Merge "Merge remote-tracking branch 'origin/tmp-84df952' into msm-kona"
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,qsee_irq.txt b/Documentation/devicetree/bindings/arm/msm/qcom,qsee_irq.txt
new file mode 100644
index 0000000..768fc416
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,qsee_irq.txt
@@ -0,0 +1,82 @@
+Binding for the QTI Secure Execution Environment IRQ controller
+===============================================================
+
+The QTI Secure Execution Environment (QSEE) IRQ controller facilitates receiving
+and clearing interrupts from QSEE. Each interrupt from QSEE has a set of control
+registers to mask, clear and get the status of interrupts. This controller will
+create an interrupt for clients to register with based on the bits available in
+the control registers.
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be one of:
+		    "qcom,sm8150-qsee-irq",
+		    "qcom,kona-qsee-irq"
+
+- syscon:
+	usage: required
+	Value type: <prop-encoded-array>
+	Definition: phandle to a syscon node representing the scsr registers
+
+- interrupts:
+	Usage: required
+	Value type: <prop-encoded array>
+	Definition: multiple entries specifying the interrupts from QSEE
+
+- interrupt-names:
+	Usage: required
+	Value type: <string>
+	Definition: Interrupt names should be one of the following to map the
+		    interrupt back to the correct registers.
+		    - sp_ipc%d
+		    - sp_rmb
+
+- interrupt-controller:
+	Usage: required
+	Value type: <empty>
+	Definition: Identifies this node as an interrupt controller
+
+- #interrupt-cells
+	Usage: required
+	Value type: <u32>
+	Definition: must be 3 - for interrupts to encode these properties:
+		    - u32 denoting index of desired interrupt in @interrupts
+		    - u32 denoting bit of interrupt bank
+		    - u32 denoting IRQ flags
+
+= EXAMPLE
+The following example shows the QSEE_IRQ setup with the GLINK SPSS node, defined
+from the sm8150 apps processor's point-of-view. In this example the GLINK node
+registers for the sp_ipc0 interrupt(index 0 in interrupt-names) and the 0th
+bit on the sp_ipc0 interrupt bank.
+
+sp_scsr_block: syscon@1880000 {
+	compatible = “syscon”;
+	reg = <0x1880000 0x10000>;
+};
+
+intsp: qcom,qsee_irq {
+	compatible = "qcom,sm8150-qsee-irq";
+
+	syscon = <&sp_scsr_block>;
+	interrupts = <0 348 IRQ_TYPE_LEVEL_HIGH>,
+		     <0 349 IRQ_TYPE_LEVEL_HIGH>;
+
+	interrupt-names = "sp_ipc0",
+			  "sp_ipc1";
+
+	interrupt-controller;
+	#interrupt-cells = <3>;
+};
+
+spss {
+	...
+	glink {
+		qcom,remote-pid = <8>;
+		mboxes = <&sp_scsr 0>;
+		mbox-names = "spss_spss";
+		interrupts = <&intsp 0 0 IRQ_TYPE_EDGE_RISING>;
+	};
+};
+
diff --git a/Documentation/devicetree/bindings/arm/msm/qsee_ipc_irq_bridge.txt b/Documentation/devicetree/bindings/arm/msm/qsee_ipc_irq_bridge.txt
new file mode 100644
index 0000000..442ad52
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qsee_ipc_irq_bridge.txt
@@ -0,0 +1,30 @@
+Qualcomm Technologies, Inc. Secure Execution Environment IPC Interrupt Bridge
+
+[Root level node]
+Required properties:
+-compatible : should be "qcom,qsee-ipc-irq-bridge";
+
+[Second level nodes]
+qcom,qsee-ipc-irq-subsystem
+Required properties:
+-qcom,dev-name: the bridge device name
+-interrupt: IPC interrupt line from remote subsystem to QSEE
+-label : The name of this subsystem.
+
+Required properties if interrupt type is IRQ_TYPE_LEVEL_HIGH[4]:
+-qcom,rx-irq-clr : the register to clear the level triggered rx interrupt
+-qcom,rx-irq-clr-mask : the bitmask to clear the rx interrupt
+
+Example:
+
+	qcom,qsee_ipc_irq_bridge {
+		compatible = "qcom,qsee-ipc-irq-bridge";
+
+		qcom,qsee-ipc-irq-spss {
+			qcom,rx-irq-clr = <0x1d08008 0x4>;
+			qcom,rx-irq-clr-mask = <0x2>;
+			qcom,dev-name = "qsee_ipc_irq_spss";
+			interrupts = <0 349 4>;
+			label = "spss";
+		};
+	};
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt
new file mode 100644
index 0000000..a889cbb
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.txt
@@ -0,0 +1,170 @@
+Qualcomm Technologies, Inc. CPUFREQ Bindings
+
+CPUFREQ HW is a hardware engine used by some Qualcomm Technologies, Inc. (QTI)
+SoCs to manage frequency in hardware. It is capable of controlling frequency
+for multiple clusters.
+
+Properties:
+- compatible
+	Usage:		required
+	Value type:	<string>
+	Definition:	must be "qcom,cpufreq-hw".
+
+- clocks
+	Usage:		required
+	Value type:	<phandle> From common clock binding.
+	Definition:	clock handle for XO clock and GPLL0 clock.
+
+- clock-names
+	Usage:		required
+	Value type:	<string> From common clock binding.
+	Definition:	must be "xo", "cpu_clk".
+
+- reg
+	Usage:		required
+	Value type:	<prop-encoded-array>
+	Definition:	Addresses and sizes for the memory of the HW bases in
+			each frequency domain.
+- reg-names
+	Usage:		Optional
+	Value type:	<string>
+	Definition:	Frequency domain name i.e.
+			"freq-domain0", "freq-domain1".
+
+* Property qcom,freq-domain
+Devices supporting freq-domain must set their "qcom,freq-domain" property with
+phandle to a cpufreq_hw followed by the Domain ID(0/1) and core count in the
+CPU DT node.
+
+
+Example:
+
+Example 1: Dual-cluster, Quad-core per cluster. CPUs within a cluster switch
+DCVS state together.
+
+/ {
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "qcom,kryo385";
+			reg = <0x0 0x0>;
+			enable-method = "psci";
+			next-level-cache = <&L2_0>;
+			qcom,freq-domain = <&cpufreq_hw 0 4>;
+			L2_0: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+				L3_0: l3-cache {
+				      compatible = "cache";
+				};
+			};
+		};
+
+		CPU1: cpu@100 {
+			device_type = "cpu";
+			compatible = "qcom,kryo385";
+			reg = <0x0 0x100>;
+			enable-method = "psci";
+			next-level-cache = <&L2_100>;
+			qcom,freq-domain = <&cpufreq_hw 0 4>;
+			L2_100: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU2: cpu@200 {
+			device_type = "cpu";
+			compatible = "qcom,kryo385";
+			reg = <0x0 0x200>;
+			enable-method = "psci";
+			next-level-cache = <&L2_200>;
+			qcom,freq-domain = <&cpufreq_hw 0 4>;
+			L2_200: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU3: cpu@300 {
+			device_type = "cpu";
+			compatible = "qcom,kryo385";
+			reg = <0x0 0x300>;
+			enable-method = "psci";
+			next-level-cache = <&L2_300>;
+			qcom,freq-domain = <&cpufreq_hw 0 4>;
+			L2_300: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU4: cpu@400 {
+			device_type = "cpu";
+			compatible = "qcom,kryo385";
+			reg = <0x0 0x400>;
+			enable-method = "psci";
+			next-level-cache = <&L2_400>;
+			qcom,freq-domain = <&cpufreq_hw 1 4>;
+			L2_400: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU5: cpu@500 {
+			device_type = "cpu";
+			compatible = "qcom,kryo385";
+			reg = <0x0 0x500>;
+			enable-method = "psci";
+			next-level-cache = <&L2_500>;
+			qcom,freq-domain = <&cpufreq_hw 1 4>;
+			L2_500: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU6: cpu@600 {
+			device_type = "cpu";
+			compatible = "qcom,kryo385";
+			reg = <0x0 0x600>;
+			enable-method = "psci";
+			next-level-cache = <&L2_600>;
+			qcom,freq-domain = <&cpufreq_hw 1 4>;
+			L2_600: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU7: cpu@700 {
+			device_type = "cpu";
+			compatible = "qcom,kryo385";
+			reg = <0x0 0x700>;
+			enable-method = "psci";
+			next-level-cache = <&L2_700>;
+			qcom,freq-domain = <&cpufreq_hw 1 4>;
+			L2_700: l2-cache {
+				compatible = "cache";
+				next-level-cache = <&L3_0>;
+			};
+		};
+	};
+
+ soc {
+	cpufreq_hw: qcom,cpufreq-hw {
+		compatible = "qcom,cpufreq-hw";
+		reg = <0x17d43000 0x1400>, <0x17d45800 0x1400>;
+		reg-names = "freq-domain0", "freq-domain1";
+
+		clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GPLL0>;
+		clock-names = "xo", "cpu_clk";
+
+		#freq-domains-cells = <2>
+
+	};
+}
diff --git a/Documentation/devicetree/bindings/display/msm/mdss-pll.txt b/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
index 4583f4e..d1e7929 100644
--- a/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/display/msm/mdss-pll.txt
@@ -18,7 +18,8 @@
                         "qcom,mdss_dsi_pll_10nm",  "qcom,mdss_dp_pll_8998",
                         "qcom,mdss_hdmi_pll_8998", "qcom,mdss_dp_pll_10nm",
                         "qcom,mdss_dsi_pll_7nm",   "qcom,mdss_dp_pll_7nm",
-			"qcom,mdss_dsi_pll_28lpm"
+			"qcom,mdss_dsi_pll_28lpm", "qcom,mdss_dsi_pll_14nm",
+			"qcom,mdss_dp_pll_14nm"
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
 - reg-names :		names to refer to register sets related to this device
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index 1a680cf..1113039 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -17,7 +17,6 @@
                         "arm,mmu-401"
                         "arm,mmu-500"
                         "cavium,smmu-v2"
-                        "qcom,smmu-v2"
                         "qcom,qsmmu-v500"
 
                   depending on the particular implementation and/or the
diff --git a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
index 6e8a9ab..3a50ad1 100644
--- a/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
+++ b/Documentation/devicetree/bindings/mailbox/qcom,apcs-kpss-global.txt
@@ -12,7 +12,9 @@
 		    "qcom,msm8996-apcs-hmss-global"
 		    "qcom,msm8998-apcs-hmss-global"
 		    "qcom,sdm845-apss-shared"
-
+		    "qcom,sm8150-apcs-hmss-global"
+		    "qcom,sm8150-spcs-global"
+		    "qcom,kona-spcs-global"
 - reg:
 	Usage: required
 	Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
new file mode 100644
index 0000000..59651a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -0,0 +1,817 @@
+* Qualcomm Technologies, Inc. MSM CCI
+
+CCI (Camera Control Interface) is module that is use for camera sensor module
+I2C communication.
+
+=======================
+Required Node Structure
+=======================
+The camera CCI node must be described in two levels of device nodes. The
+first level describe the overall CCI node structure. Second level nodes
+describe camera sensor submodule nodes which is using CCI for
+i2c communication.
+
+======================================
+First Level Node - CCI device
+======================================
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cci".
+
+- cell-index: cci hardware core index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the Hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: offset and length of the register set
+	for the device for the cci operating in
+	compatible mode.
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Should specify relevant names to each
+	reg property defined.
+
+- interrupts
+  Usage: required
+  Value type: <u32>
+  Definition: Interrupt associated with CCI HW.
+
+- interrupt-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- gpios
+  Usage: required
+  Value type: <phandle>
+  Definition: should specify the gpios to be used for the CCI.
+
+- gpio-req-tbl-num
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+  Usage: required
+  Value type: <string>
+  Definition: should specify the gpio labels in
+	gpio-req-tbl-num property (in the same order)
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CCI HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clock rates in Hz for CCI HW.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: All different clock level node can support.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: all clock phandle and source clocks.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: name for the source clock.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: name of the voltage regulators required for the device.
+
+- gdscr-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain gdsr regulator used for cci clocks.
+
+- mmagic-supply
+  Usage: optional
+  Value type: <phandle>
+  Definition: should contain mmagic regulator used for mmagic clocks.
+
+=========================
+CCI clock settings
+=========================
+- I2c speed settings (*)
+  Usage: required
+  Definition: List of i2c rates for CCI HW.
+  - i2c_freq_100Khz
+    Definition: qcom,i2c_standard_mode - node should contain clock settings for
+		100Khz
+  - i2c_freq_400Khz
+    Definition: qcom,i2c_fast_mode - node should contain clock settings for
+		400Khz
+  - i2c_freq_custom
+    Definition: qcom,i2c_custom_mode - node can contain clock settings for
+		frequencies other than 100Khz and 400Khz which is specific to usecase.
+		Currently it has settings for 375Khz.
+  - i2c_freq_1Mhz
+    Definition: qcom,i2c_fast_plus_mode - node should contain clock
+		settings for 1Mhz
+* if speed settings is not defined the low level driver can use "i2c_freq_custom"
+like default
+
+  - hw-thigh
+    Definition: should contain high period of the SCL clock in terms of CCI clock cycle
+  - hw-tlow
+    Definition: should contain high period of the SCL clock in terms of CCI clock cycle
+  - hw-tsu-sto
+    Definition: should contain setup time for STOP condition
+  - hw-tsu-sta
+    Definition: should contain setup time for Repeated START condition
+  - hw-thd-dat
+    Definition: should contain hold time for the data
+  - hw-thd-sta
+    Definition: should contain hold time for START condition
+  - hw-tbuf
+    Definition: should contain free time between a STOP and a START condition
+  - hw-scl-stretch-en
+    Definition: should contain enable or disable clock stretching
+  - hw-trdhld
+    Definition: should contain internal hold time for SDA
+  - hw-tsp
+    Definition: should contain filtering of glitches
+
+Example:
+
+	qcom,cci@0xfda0c000 {
+		cell-index = <0>;
+		compatible = "qcom,cci";
+		reg = <0xfda0c000 0x300>;
+		reg-names = "cci";
+		interrupts = <0 50 0>;
+		interrupt-names = "cci";
+		clock-names = "camnoc_axi_clk", "soc_ahb_clk",
+		"slow_ahb_src_clk", "cpas_ahb_clk",
+		"cci_clk", "cci_clk_src";
+		clock-rates = <0 0 80000000 0 0 37500000>;
+		clock-cntl-level = "turbo";
+		gpios = <&tlmm 17 0>,
+			   <&tlmm 18 0>,
+			   <&tlmm 19 0>,
+			   <&tlmm 20 0>;
+		gpio-tbl-num = <0 1 2 3>;
+		gpio-tbl-flags = <1 1 1 1>;
+		gpio-tbl-label = "CCI_I2C_DATA0",
+				"CCI_I2C_CLK0",
+				"CCI_I2C_DATA1",
+				"CCI_I2C_CLK1";
+		i2c_freq_100Khz: qcom,i2c_standard_mode {
+			 hw-thigh = <78>;
+			 hw-tlow = <114>;
+			 hw-tsu-sto = <28>;
+			 hw-tsu-sta = <28>;
+			 hw-thd-dat = <10>;
+			 hw-thd-sta = <77>;
+			 hw-tbuf = <118>;
+			 hw-scl-stretch-en = <0>;
+			 hw-trdhld = <6>;
+			 hw-tsp = <1>;
+			 status = "ok";
+		};
+		i2c_freq_400Khz: qcom,i2c_fast_mode {
+			 hw-thigh = <20>;
+			 hw-tlow = <28>;
+			 hw-tsu-sto = <21>;
+			 hw-tsu-sta = <21>;
+			 hw-thd-dat = <13>;
+			 hw-thd-sta = <18>;
+			 hw-tbuf = <25>;
+			 hw-scl-stretch-en = <0>;
+			 hw-trdhld = <6>;
+			 hw-tsp = <3>;
+			 status = "ok";
+		};
+		i2c_freq_custom: qcom,i2c_custom_mode {
+			 hw-thigh = <15>;
+			 hw-tlow = <28>;
+			 hw-tsu-sto = <21>;
+			 hw-tsu-sta = <21>;
+			 hw-thd-dat = <13>;
+			 hw-thd-sta = <18>;
+			 hw-tbuf = <25>;
+			 hw-scl-stretch-en = <1>;
+			 hw-trdhld = <6>;
+			 hw-tsp = <3>;
+			 status = "ok";
+		};
+		i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+			 hw-thigh = <16>;
+			 hw-tlow = <22>;
+			 hw-tsu-sto = <17>;
+			 hw-tsu-sta = <18>;
+			 hw-thd-dat = <16>;
+			 hw-thd-sta = <15>;
+			 hw-tbuf = <19>;
+			 hw-scl-stretch-en = <1>;
+			 hw-trdhld = <3>;
+			 hw-tsp = <3>;
+			 cci-clk-src = <37500000>;
+			 status = "ok";
+		};
+	};
+
+=======================================
+Second Level Node - CAM SENSOR MODULES
+=======================================
+
+=======================================
+CAM SENSOR RESOURCE MANAGER
+=======================================
+Camera Sensor Resource manager node contains properties of shared camera
+sensor resource.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-res-mgr".
+
+- shared-gpios
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain the gpios which are used by two or more
+	cameras, and these cameras may be opened together.
+
+- pinctrl-names
+  Usage: optional
+  Value type: <string>
+  Definition: List of names to assign the shared pin state defined in pinctrl device node
+
+- pinctrl-<0..n>
+  Usage: optional
+  Value type: <phandle>
+  Definition: Lists phandles each pointing to the pin configuration node within a pin
+	controller. These pin configurations are installed in the pinctrl device node.
+
+
+=============================
+CAMERA IMAGE SENSOR MODULE
+=============================
+Image sensor node contains properties of camera image sensor
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-sensor".
+
+- cell-index: cci hardware core index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the Hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: offset and length of the register set
+	for the device for the cci operating in
+	compatible mode.
+
+- cci-device
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c device id to be used for this camera
+	sensor
+
+- cci-master
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+	sensor
+	- 0 -> MASTER 0
+	- 1 -> MASTER 1
+
+- csiphy-sd-index
+  Usage: required
+  Value type: <u32>
+  Definition: should contain csiphy instance that will used to
+	receive sensor data (0, 1, 2, 3).
+
+- cam_vdig-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain regulator from which digital voltage is
+	supplied
+
+- cam_vana-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain regulator from which analog voltage is
+	supplied
+
+- cam_vio-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain regulator from which IO voltage is supplied
+
+- cam_bob-supply
+  Usage: optional
+  Value type: <phandle>
+  Definition: should contain regulator from which BoB voltage is supplied
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: should contain names of all regulators needed by this
+	sensor
+
+- rgltr-cntrl-support
+  Usage: required
+  Value type: <boolean>
+  Definition: This property is required if the sw control regulator parameters
+	e.g. rgltr-min-voltage
+
+- rgltr-min-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain minimum voltage level for regulators mentioned
+	in regulator-names property (in the same order)
+
+- rgltr-max-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain maximum voltage level for regulators mentioned
+	in regulator-names property (in the same order)
+
+- rgltr-load-current
+  Usage: required
+  Value type: <u32>
+  Definition: should contain optimum voltage level for regulators mentioned
+	in regulator-names property (in the same order)
+
+- sensor-position-roll
+  Usage: required
+  Value type: <u32>
+  Definition: should contain sensor rotational angle with respect to axis of
+	reference. i.e. 0, 90, 180, 360
+
+- sensor-position-pitch
+  Usage: required
+  Value type: <u32>
+  Definition: should contain sensor rotational angle with respect to axis of
+	reference. i.e. 0, 90, 180, 360
+
+- sensor-position-yaw
+  Usage: required
+  Value type: <u32>
+  Definition: should contain sensor rotational angle  with respect to axis of
+	reference. i.e. 0, 90, 180, 360
+
+- qcom,secure
+  Usage: optional
+  Value type: <u32>
+  Definition: should be enabled to operate the camera in secure mode
+
+- gpio-no-mux
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain field to indicate whether gpio mux table is
+	available. i.e. 1 if gpio mux is not available, 0 otherwise
+
+- cam_vaf-supply
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain regulator from which AF voltage is supplied
+
+- pwm-switch
+  Usage: optional
+  Value type: <boolean>
+  Definition: This property is required for regulator to switch into PWM mode.
+
+- gpios
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain phandle to gpio controller node and array of
+	#gpio-cells specifying specific gpio (controller specific)
+
+- gpio-reset
+  Usage: required
+  Value type: <u32>
+  Definition: should contain index to gpio used by sensors reset_n
+
+- gpio-standby
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by sensors standby_n
+
+- gpio-vio
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by sensors io vreg enable
+
+- gpio-vana
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by sensors analog vreg enable
+
+- gpio-vdig
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by sensors digital vreg enable
+
+- gpio-vaf
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by sensors af vreg enable
+
+- gpio-af-pwdm
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by sensors af pwdm_n
+
+- gpio-req-tbl-num
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpios specific to this sensor
+
+- gpio-req-tbl-flags
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain direction of gpios present in
+	gpio-req-tbl-num property (in the same order)
+
+- gpio-req-tbl-label
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain name of gpios present in
+	gpio-req-tbl-num property (in the same order)
+
+- gpio-set-tbl-num
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index of gpios that need to be
+	configured by msm
+
+- gpio-set-tbl-flags
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain value to be configured for the gpios
+	present in gpio-set-tbl-num property (in the same order)
+
+- gpio-set-tbl-delay
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain amount of delay after configuring
+	gpios as specified in gpio_set_tbl_flags property (in the same order)
+
+- actuator-src
+  Usage: optional
+  Value type: <phandle>
+  Definition: if auto focus is supported by this sensor, this
+	property should contain phandle of respective actuator node
+
+- led-flash-src
+  Usage: optional
+  Value type: <phandle>
+  Definition: if LED flash is supported by this sensor, this
+	property should contain phandle of respective LED flash node
+
+- qcom,vdd-cx-supply
+  Usage: optional
+  Value type: <phandle>
+  Definition: should contain regulator from which cx voltage is supplied
+
+- qcom,vdd-cx-name
+  Usage: optional
+  Value type: <string>
+  Definition: should contain names of cx regulator
+
+- eeprom-src
+  Usage: optional
+  Value type: <phandle>
+  Definition: if eeprom memory is supported by this sensor, this
+	property should contain phandle of respective eeprom nodes
+
+- ois-src
+  Usage: optional
+  Value type: <phandle>
+  Definition: if optical image stabilization is supported by this sensor,
+	this property should contain phandle of respective ois node
+
+- ir-led-src
+  Usage: optional
+  Value type: <phandle>
+  Definition: if ir led is supported by this sensor, this property
+	should contain phandle of respective ir-led node
+
+- qcom,ir-cut-src
+  Usage: optional
+  Value type: <phandle>
+  Definition: if ir cut is supported by this sensor, this property
+	should contain phandle of respective ir-cut node
+
+- qcom,special-support-sensors
+  Usage: required
+  Value type: <string>
+  Definition: if only some special sensors are supported
+	on this board, add sensor name in this property.
+
+- use-shared-clk
+  Usage: optional
+  Value type: <boolean>
+  Definition: It is booloean property. This property is required
+	if the clk is shared clk between different sensor and ois, if this
+	device need to be opened together.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: clock rate in Hz.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: All different clock level node can support.
+
+- clock-cntl-support
+  Usage: optional
+  Value type: <boolean>
+  Definition: Says whether clock control support is present or not
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: all clock phandle and source clocks.
+
+- clock-control
+  Usage: optional
+  Value type: <string>
+  Definition: The valid fields are "NO_SET_RATE", "INIT_RATE" and
+	"SET_RATE". "NO_SET_RATE" the corresponding clock is enabled without setting
+	the rate assuming some other driver has already set it to appropriate rate.
+	"INIT_RATE" clock rate is not queried assuming some other driver has set
+	the clock rate and ispif will set the the clock to this rate.
+	"SET_RATE" clock is enabled and the rate is set to the value specified
+	in the property clock-rates.
+
+=============================
+ACTUATOR MODULE
+=============================
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,actuator".
+
+- cell-index: cci hardware core index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the Hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: offset and length of the register set
+	for the device for the cci operating in
+	compatible mode.
+
+- cci-device
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c device id to be used for this camera
+	sensor
+
+- cci-master
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+	sensor
+	- 0 -> MASTER 0
+	- 1 -> MASTER 1
+
+- cam_vaf-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain regulator from which AF voltage is supplied
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: should contain names of all regulators needed by this
+	actuator. i.e. "cam_vaf"
+
+- rgltr-cntrl-support
+  Usage: optional
+  Value type: <boolean>
+  Definition: It is booloean property. This property is required
+	if the code and regulator control parameters e.g. rgltr-min-voltage
+
+- rgltr-min-voltage
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain minimum voltage level in mcrovolts
+	for regulators mentioned in regulator-names property (in the same order)
+
+- rgltr-max-voltage
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain maximum voltage level in mcrovolts
+	for regulators mentioned in regulator-names property (in the same order)
+
+- rgltr-load-current
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain the maximum current in microamps
+	required from the regulators mentioned in the regulator-names property
+	(in the same order).
+
+=============================
+OIS MODULE
+=============================
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,ois".
+
+- cell-index: cci hardware core index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the Hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: offset and length of the register set
+	for the device for the cci operating in
+	compatible mode.
+
+- cci-device
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c device id to be used for this camera
+	sensor
+
+- cci-master
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+	sensor
+	- 0 -> MASTER 0
+	- 1 -> MASTER 1
+
+- cam_vaf-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain regulator from which AF voltage is supplied
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: should contain names of all regulators needed by this
+	actuator. i.e. "cam_vaf"
+
+- rgltr-cntrl-support
+  Usage: optional
+  Value type: <boolean>
+  Definition: It is booloean property. This property is required
+	if the code and regulator control parameters e.g. rgltr-min-voltage
+
+- rgltr-min-voltage
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain minimum voltage level in mcrovolts
+	for regulators mentioned in regulator-names property (in the same order)
+
+- rgltr-max-voltage
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain maximum voltage level in mcrovolts
+	for regulators mentioned in regulator-names property (in the same order)
+
+- rgltr-load-current
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain the maximum current in microamps
+	required from the regulators mentioned in the regulator-names property
+	(in the same order).
+
+- use-shared-clk
+  Usage: optional
+  Value type: <boolean>
+  Definition: This property is required if the clk is shared clk between different
+	sensor and ois, if this device need to be opened together.
+
+Example:
+&soc {
+    led_flash0: qcom,camera-flash@0 {
+         cell-index = <0>;
+         compatible = "qcom,camera-flash";
+         flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
+         torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+         switch-source = <&pmi8998_switch>;
+         status = "ok";
+    };
+};
+
+&cam_cci0 {
+    actuator0: qcom,actuator@0 {
+         cell-index = <0>;
+         reg = <0x0>;
+         compatible = "qcom,actuator";
+         cci-device = <0>;
+         cci-master = <0>;
+         cam_vaf-supply = <&pmi8998_bob>;
+         regulator-names = "cam_vaf";
+         rgltr-cntrl-support;
+         rgltr-min-voltage = <2800000>;
+         rgltr-max-voltage = <2800000>;
+         rgltr-load-current = <100000>;
+    };
+
+	ois0: qcom,ois@0 {
+         cell-index = <0>;
+         reg = <0x0>;
+         compatible = "qcom,ois";
+         cci-device = <0>;
+         cci-master = <0>;
+         cam_vaf-supply = <&pmi8998_bob>;
+         regulator-names = "cam_vaf";
+         rgltr-cntrl-support;
+         rgltr-min-voltage = <2800000>;
+         rgltr-max-voltage = <2800000>;
+         rgltr-load-current = <100000>;
+    };
+
+    qcom,cam-res-mgr {
+         compatible = "qcom,cam-res-mgr";
+         status = "ok";
+         shared-gpios = <18 19>;
+         pinctrl-names = "cam_res_mgr_default", "cam_res_mgr_suspend";
+         pinctrl-0 = <&cam_shared_clk_active &cam_res_mgr_active>;
+         pinctrl-1 = <&cam_shared_clk_suspend &cam_res_mgr_suspend>;
+    };
+
+    qcom,cam-sensor@0 {
+         cell-index = <0>;
+         compatible = "qcom,camera";
+         reg = <0x0>;
+         csiphy-sd-index = <0>;
+         sensor-position-roll = <90>;
+         sensor-position-pitch = <0>;
+         sensor-position-yaw = <180>;
+         secure = <1>;
+         led-flash-src = <&led_flash0>;
+         actuator-src = <&actuator0>;
+         ois-src = <&ois0>;
+         eeprom-src = <&eeprom0>;
+         cam_vdig-supply = <&pm8009_l2>;
+         cam_vio-supply = <&pm8009l_l1>;
+         cam_vana-supply = <&pm8009l_l5>;
+         cam_bob-supply = <&pm8150l_bob>;
+         cam_clk-supply = <&tital_top_gdsc>;
+         regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+                "cam_clk", "cam_bob";
+         rgltr-cntrl-support;
+         pwm-switch;
+         rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+         rgltr-max-voltage = <0 2800000 1200000 0 4000000>;
+         rgltr-load-current = <0 80000 1200000 0 2000000>;
+         gpio-no-mux = <0>;
+         pinctrl-names = "cam_default", "cam_suspend";
+         pinctrl-0 = <&cam_sensor_mclk0_active
+                   &cam_sensor_rear_active>;
+         pinctrl-1 = <&cam_sensor_mclk0_suspend
+                   &cam_sensor_rear_suspend>;
+         gpios = <&tlmm 13 0>,
+              <&tlmm 80 0>,
+              <&tlmm 79 0>;
+         gpio-reset = <1>;
+         gpio-standby = <2>;
+         gpio-req-tbl-num = <0 1 2>;
+         gpio-req-tbl-flags = <1 0 0>;
+         gpio-req-tbl-label = "CAMIF_MCLK0",
+                         "CAM_RESET0",
+                         "CAM_VANA";
+         sensor-position = <0>;
+         sensor-mode = <0>;
+         cci-device = <0>;
+         cci-master = <0>;
+         status = "ok";
+         use-shared-clk;
+         clocks = <&clock_mmss clk_mclk0_clk_src>,
+               <&clock_mmss clk_camss_mclk0_clk>;
+         clock-names = "cam_src_clk", "cam_clk";
+         clock-cntl-leveli = "turbo";
+         clock-rates = <24000000>;
+    };
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
new file mode 100644
index 0000000..49194392
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cdm.txt
@@ -0,0 +1,155 @@
+* Qualcomm Technologies, Inc. MSM Camera CDM
+
+CDM (Camera Data Mover) is module intended to provide means for fast programming
+camera registers and lookup tables.
+
+=======================
+Required Node Structure
+=======================
+CDM Interface node takes care of the handling has HW nodes and provide interface
+for camera clients.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cdm-intf".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cam-cdm-intf".
+
+- num-hw-cdm
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported HW blocks.
+
+- cdm-client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CDM interface.
+
+Example:
+	qcom,cam-cdm-intf {
+		compatible = "qcom,cam-cdm-intf";
+		label = "cam-cdm-intf";
+		num-hw-cdm = <1>;
+		cdm-client-names = "vfe",
+			"jpeg-dma",
+			"jpeg",
+			"fd";
+	};
+
+=======================
+Required Node Structure
+=======================
+CDM HW node provides interface for camera clients through
+to CDM interface node.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam480-cpas-cdm0", "qcom,cam480-cpas-cdm1"
+              or "qcom,cam480-cpas-cdm2".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas-cdm".
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: required
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+	to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CDM HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CDM HW.
+
+- camss-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+	in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CDM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CDM HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- cdm-client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CDM HW node.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
+Example:
+	qcom,cpas-cdm0@ac48000 {
+		cell-index = <0>;
+		compatible = "qcom,cam480-cpas-cdm0";
+		label = "cpas-cdm0";
+		reg = <0xac48000 0x1000>;
+		reg-names = "cpas-cdm";
+		interrupts = <0 461 0>;
+		interrupt-names = "cpas-cdm";
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"titan_top_ahb_clk",
+			"cam_axi_clk",
+			"camcc_slow_ahb_clk_src",
+			"cpas_top_ahb_clk",
+			"camnoc_axi_clk";
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		qcom,clock-rates = <0 80000000 80000000 80000000 80000000 80000000>;
+		cdm-client-names = "ife";
+		clock-cntl-level = "turbo";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
new file mode 100644
index 0000000..0815873
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cpas.txt
@@ -0,0 +1,331 @@
+* Qualcomm Technologies, Inc. MSM Camera CPAS
+
+The MSM camera CPAS device provides dependency definitions for
+enabling Camera CPAS HW and provides the Client definitions
+for all HW blocks that use CPAS driver for BW voting. These
+definitions consist of various properties that define the list
+of clients supported, AHB, AXI master-slave IDs used for BW
+voting.
+
+=======================
+Required Node Structure
+=======================
+The camera CPAS device must be described in four levels of device nodes. The
+first level describes the overall CPAS device. Within it, second level nodes
+describe the list of AXI ports that map different clients for AXI BW voting.
+Third level nodes describe the details of each AXI port having name, mnoc,
+camnoc AXI Bus information. Fourth level nodes describe the details of Bus
+master-slave IDs, ab, ib values for mnoc, camnoc bus interface.
+
+==================================
+First Level Node - CAM CPAS device
+==================================
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-cpas".
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas".
+
+- arch-compat
+  Usage: required
+  Value type: <string>
+  Definition: Should be "cpas_top" or "camss_top".
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: required
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+	to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CAMNOC HW.
+
+- qcom,cpas-hw-ver
+  Usage: required
+  Value type: <u32>
+  Definition: CAM HW Version information.
+
+- camnoc-axi-min-ib-bw
+  Usage: optional
+  Value type: <u64>
+  Definition: Min camnoc axi bw for the given target.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CPAS HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+	in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CPAS HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CPAS HW.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
+- control-camnoc-axi-clk
+  Usage: optional
+  Value type: <empty>
+  Definition: Bool property specifying whether to control camnoc axi
+              clock from cpas driver.
+
+- camnoc-bus-width
+  Usage: required if control-camnoc-axi-clk is enabled
+  Value type: <u32>
+  Definition: camnoc bus width.
+
+- camnoc-axi-clk-bw-margin-perc
+  Usage: optional
+  Value type: <u32>
+  Definition: Percentage value to be added to camnoc bw while calculating
+              camnoc axi clock frequency.
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+  Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+  for the properties above.
+
+- vdd-corners
+  Usage: required
+  Value type: <u32>
+  Definition: List of vdd corners to map for ahb level.
+
+- vdd-corner-ahb-mapping
+  Usage: required
+  Value type: <string>
+  Definition: List of ahb level strings corresponds to vdd-corners.
+  Supported strings: suspend, svs, nominal, turbo
+
+- client-id-based
+  Usage: required
+  Value type: <empty>
+  Definition: Bool property specifying whether CPAS clients are ID based.
+
+- client-names
+  Usage: required
+  Value type: <string>
+  Definition: List of Clients supported by CPAS.
+
+- client-axi-port-names
+  Usage: required
+  Value type: <string>
+  Definition: AXI Port name for each client.
+
+- client-bus-camnoc-based
+  Usage: required
+  Value type: <empty>
+  Definition: Bool property specifying whether Clients are connected
+	through CAMNOC for AXI access.
+
+===================================================================
+Third Level Node - CAM AXI Port properties
+===================================================================
+- qcom,axi-port-name
+  Usage: required
+  Value type: <string>
+  Definition: Name of the AXI Port.
+
+===================================================================
+Fourth Level Node - CAM AXI Bus properties
+===================================================================
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+  Please refer Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+  for the properties above.
+
+- qcom,msm-bus-vector-dyn-vote
+  Usage: optional
+  Value type: <empty>
+  Definition: Bool property specifying whether this bus client
+	is dynamic vote based.
+
+Example:
+
+	qcom,cam-cpas@ac40000 {
+		cell-index = <0>;
+		compatible = "qcom,cam-cpas";
+		label = "cpas";
+		arch-compat = "cpas_top";
+		status = "ok";
+		reg-names = "cam_cpas_top", "cam_camnoc";
+		reg = <0xac40000 0x1000>,
+			<0xac42000 0x5000>;
+		reg-cam-base = <0x40000 0x42000>;
+		interrupt-names = "cpas_camnoc";
+		interrupts = <0 459 0>;
+		qcom,cpas-hw-ver = <0x170100>; /* Titan v170 v1.0.0 */
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"camnoc_axi_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>;
+		src-clock-name = "slow_ahb_clk_src";
+		clock-rates = <0 0 0 0 80000000 0>;
+		clock-cntl-level = "turbo";
+		control-camnoc-axi-clk;
+		camnoc-bus-width = <32>;
+		camnoc-axi-clk-bw-margin-perc = <10>;
+		qcom,msm-bus,name = "cam_ahb";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 0>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 300000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>,
+			<MSM_BUS_MASTER_AMPSS_M0
+			MSM_BUS_SLAVE_CAMERA_CFG 0 640000>;
+		client-id-based;
+		client-names =
+			"ife0", "ife1", "ife2", "ipe0",
+			"ipe1", "cam-cdm-intf0", "cpas-cdm0", "bps0",
+			"icp0", "jpeg-dma0", "jpeg0", "fd0";
+		client-axi-port-names =
+			"cam_hf_1", "cam_hf_2", "cam_hf_2", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1",
+			"cam_sf_1", "cam_sf_1", "cam_sf_1", "cam_sf_1";
+		client-bus-camnoc-based;
+		qcom,axi-port-list {
+			qcom,axi-port1 {
+				qcom,axi-port-name = "cam_hf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port2 {
+				qcom,axi-port-name = "cam_hf_2";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_hf_2_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_hf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_HF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+			qcom,axi-port3 {
+				qcom,axi-port-name = "cam_sf_1";
+				qcom,axi-port-mnoc {
+					qcom,msm-bus,name = "cam_sf_1_mnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+				qcom,axi-port-camnoc {
+					qcom,msm-bus,name = "cam_sf_1_camnoc";
+					qcom,msm-bus-vector-dyn-vote;
+					qcom,msm-bus,num-cases = <2>;
+					qcom,msm-bus,num-paths = <1>;
+					qcom,msm-bus,vectors-KBps =
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>,
+						<MSM_BUS_MASTER_CAMNOC_SF
+						MSM_BUS_SLAVE_EBI_CH0 0 0>;
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
new file mode 100644
index 0000000..0b00cc0
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
@@ -0,0 +1,117 @@
+* Qualcomm Technologies, Inc. MSM CSI Phy
+
+=======================
+Required Node Structure
+=======================
+The camera CSIPHY node must be described in First level of device nodes. The
+first level describe the overall CSIPHY node structure.
+
+======================================
+First Level Node - CSIPHY device
+======================================
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,csiphy-v1.0",
+	"qcom,csiphy-v1.1", "qcom,csiphy-v1.2",
+	"qcom,csiphy-v2.0", "qcom,csiphy".
+
+- cell-index: csiphy hardware core index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the Hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: offset and length of the register set
+	for the device for the csiphy operating in
+	compatible mode.
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Should specify relevant names to each
+	reg property defined.
+
+- reg-cam-base
+  Usage: required
+  Value type: <string>
+  Definition: offset of CSIPHY in  camera hw block
+
+- interrupts
+  Usage: required
+  Value type: <u32>
+  Definition: Interrupt associated with CCI HW.
+
+- interrupt-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CSIPHY HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clock rates in Hz for CSIPHY HW.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: All different clock level node can support.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: all clock phandle and source clocks.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: name of the voltage regulators required for the device.
+
+- gdscr-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain gdsr regulator used for CSIPHY clocks.
+
+- mipi-csi-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: should contain phandle for mipi-csi-vdd regulator used for
+	CSIPHY device.
+
+- csi-vdd-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain required voltage for csi-vdd supply for CSIPHY.
+
+Example:
+
+qcom,csiphy@ac65000 {
+     cell-index = <0>;
+     compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+     reg = <0xac65000 0x200>;
+     reg-cam-base = <0x65000>;
+     reg-names = "csiphy";
+     interrupts = <0 477 0>;
+     interrupt-names = "csiphy";
+     regulator-names = "gdscr", "refgen";
+     mipi-csi-vdd-supply = <&pm8998_l1>;
+     csi-vdd-voltage = <1200000>;
+     gdscr-supply = <&titan_top_gdsc>;
+     clocks = <&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>,
+              <&clock_camcc CAM_CC_CSIPHY0_CLK>,
+              <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK_SRC>,
+              <&clock_camcc CAM_CC_CSI0PHYTIMER_CLK>;
+     clock-names = "cphy_rx_clk_src", "csiphy0_clk",
+              "csi0phytimer_clk_src", "csi0phytimer_clk";
+     clock-rates = <400000000 0 300000000 0>;
+     clock-cntl-level = "turbo";
+     status = "ok";
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
new file mode 100644
index 0000000..d77f337
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-eeprom.txt
@@ -0,0 +1,503 @@
+* Qualcomm Technologies, Inc. MSM EEPROM
+
+EEPROM is a one time programmed(OTP) device that stores the calibration data
+use for camera sensor. It may either be integrated in the sensor module or in
+the sensor itself. As a result, the power, clock and GPIOs may be the same as
+the camera sensor. The following describes the page block map, power supply,
+clock, GPIO and power on sequence properties of the EEPROM device.
+
+=======================================================
+Required Node Structure if probe happens from userspace
+=======================================================
+The EEPROM device is described in one level of the device node.
+
+======================================
+First Level Node - CAM EEPROM device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,eeprom".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for EEPROM HW.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+		"regulator-names".
+
+- rgltr-cntrl-support
+  Usage: required
+  Value type: <bool>
+  Definition: This property specifies if the regulator control is supported
+		e.g. rgltr-min-voltage.
+
+- rgltr-min-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain minimum voltage level for regulators
+		mentioned in regulator-names property.
+
+- rgltr-max-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain maximum voltage level for regulators
+		mentioned in regulator-names property.
+
+- rgltr-load-current
+  Usage: required
+  Value type: <u32>
+  Definition: should contain the maximum current in microamps required for
+		the regulators mentioned in regulator-names property.
+
+- gpio-no-mux
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio mux type.
+
+- gpios
+  Usage: required
+  Value type: <phandle>
+  Definition: should specify the gpios to be used for the eeprom.
+
+- gpio-reset
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the reset gpio index.
+
+- gpio-standby
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the standby gpio index.
+
+- gpio-req-tbl-num
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+  Usage: required
+  Value type: <string>
+  Definition: should specify the gpio labels.
+
+- sensor-position
+  Usage: required
+  Value type: <u32>
+  Definition: should contain the mount angle of the camera sensor.
+
+- cci-device
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c device id to be used for this camera
+	sensor
+
+- cci-master
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+		sensor.
+
+- sensor-mode
+  Usage: required
+  Value type: <u32>
+  Definition: should contain sensor mode supported.
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for EEPROM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for EEPROM HW.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: says what all different clock levels eeprom node has.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+Example:
+
+	eeprom0: qcom,eeprom@0 {
+	        cell-index = <0>;
+		reg = <0x0>;
+		compatible = "qcom,eeprom";
+		cam_vdig-supply = <&pm8998_l5>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		regulator-names = "cam_vdig", "cam_vio";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1200000 0>;
+		rgltr-max-voltage = <1200000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		gpio-no-mux = <0>;
+		gpios = <&msmgpio 26 0>,
+			<&msmgpio 37 0>,
+			<&msmgpio 36 0>;
+		gpio-reset = <1>;
+		gpio-standby = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET1",
+			"CAM_STANDBY";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-device = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-names = "cam_clk";
+		clock-cntl-level = "turbo";
+		clock-rates = <24000000>;
+	};
+
+=======================================================
+Required Node Structure if probe happens from kernel
+=======================================================
+The EEPROM device is described in one level of the device node.
+
+======================================
+First Level Node - CAM EEPROM device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,eeprom".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- qcom,eeprom-name
+  Usage: required
+  Value type: <string>
+  Definition: Name of the EEPROM HW.
+
+- qcom,slave-addr
+  Usage: required
+  Value type: <u32>
+  Definition: Slave address of the EEPROM HW.
+
+- qcom,num-blocks
+  Usage: required
+  Value type: <u32>
+  Definition: Total block number that eeprom contains.
+
+- qcom,pageX
+  Usage: required
+  Value type: <u32>
+  Definition: List of values specifying page size, start address,
+		address type, data, data type, delay in ms.
+		size 0 stand for non-paged.
+
+- qcom,pollX
+  Usage: required
+  Value type: <u32>
+  Definition: List of values specifying poll size, poll reg address,
+		address type, data, data type, delay in ms.
+		size 0 stand for not used.
+
+- qcom,memX
+  Usage: required
+  Value type: <u32>
+  Definition: List of values specifying memory size, start address,
+		address type, data, data type, delay in ms.
+		size 0 stand for not used.
+
+- qcom,saddrX
+  Usage: required
+  Value type: <u32>
+  Definition: property should specify the slave address for block (%d).
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for EEPROM HW.
+
+- qcom,cmm-data-support
+  Usage: required
+  Value type: <u32>
+  Definition: Camera MultiModule data capability flag..
+
+- qcom,cmm-data-compressed
+  Usage: required
+  Value type: <u32>
+  Definition: Camera MultiModule data compression flag.
+
+- qcom,cmm-data-offset
+  Usage: required
+  Value type: <u32>
+  Definition: Camera MultiModule data start offset.
+
+- qcom,cmm-data-size
+  Usage: required
+  Value type: <u32>
+  Definition: Camera MultiModule data size.
+
+- qcom,cam-power-seq-type
+  Usage: required
+  Value type: <string>
+  Definition: should specify the power on sequence types.
+
+- qcom,cam-power-seq-val
+  Usage: required
+  Value type: <string>
+  Definition: should specify the power on sequence values.
+
+- qcom,cam-power-seq-cfg-val
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the power on sequence config values.
+
+- qcom,cam-power-seq-delay
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the power on sequence delay time in ms.
+
+- spiop-read
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read operation related data.
+
+- spiop-readseq
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read sequence operation realted data.
+
+- spiop-queryid
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI query eeprom id operation related data.
+
+- spiop-pprog:
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI page program operation related data.
+
+- spiop-wenable
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI write enable operation related data.
+
+- spiop-readst
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI read destination operation related data.
+
+- spiop-erase
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides SPI erase operation related data.
+
+- eeprom-idx
+  Usage: required
+  Value type: <u32>
+  Definition: this array provides eeprom id realted data.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+		"regulator-names".
+
+- rgltr-cntrl-support
+  Usage: required
+  Value type: <bool>
+  Definition: This property specifies if the regulator control is supported
+		e.g. rgltr-min-voltage.
+
+- rgltr-min-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain minimum voltage level for regulators
+		mentioned in regulator-names property.
+
+- rgltr-max-voltage
+  Usage: required
+  Value type: <u32>
+  Definition: should contain maximum voltage level for regulators
+		mentioned in regulator-names property.
+
+- rgltr-load-current
+  Usage: required
+  Value type: <u32>
+  Definition: should contain the maximum current in microamps required for
+		the regulators mentioned in regulator-names property.
+
+- gpio-no-mux
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio mux type.
+
+- gpios
+  Usage: required
+  Value type: <phandle>
+  Definition: should specify the gpios to be used for the eeprom.
+
+- gpio-reset
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the reset gpio index.
+
+- gpio-standby
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the standby gpio index.
+
+- gpio-req-tbl-num
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+  Usage: required
+  Value type: <u32>
+  Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+  Usage: required
+  Value type: <string>
+  Definition: should specify the gpio labels.
+
+- sensor-position
+  Usage: required
+  Value type: <u32>
+  Definition: should contain the mount angle of the camera sensor.
+
+- cci-device
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c device id to be used for this camera
+	sensor
+
+- cci-master
+  Usage: required
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+		sensor.
+
+- sensor-mode
+  Usage: required
+  Value type: <u32>
+  Definition: should contain sensor mode supported.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition:  says what all different clock levels eeprom node has.
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for EEPROM HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for EEPROM HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+Example:
+
+	eeprom0: qcom,eeprom@0 {
+		cell-index = <0>;
+		reg = <0x0>;
+		qcom,eeprom-name = "msm_eeprom";
+		eeprom-id0 = <0xF8 0x15>;
+		eeprom-id1 = <0xEF 0x15>;
+		eeprom-id2 = <0xC2 0x36>;
+		eeprom-id3 = <0xC8 0x15>;
+		compatible = "qcom,eeprom";
+		qcom,slave-addr = <0x60>;
+		qcom,num-blocks = <2>;
+		qcom,page0 = <1 0x100 2 0x01 1 1>;
+		qcom,poll0 = <0 0x0 2 0 1 1>;
+		qcom,mem0 = <0 0x0 2 0 1 0>;
+		qcom,page1 = <1 0x0200 2 0x8 1 1>;
+		qcom,pageen1 = <1 0x0202 2 0x01 1 10>;
+		qcom,poll1 = <0 0x0 2 0 1 1>;
+		qcom,mem1 = <32 0x3000 2 0 1 0>;
+		qcom,saddr1 = <0x62>;
+		qcom,cmm-data-support;
+		qcom,cmm-data-compressed;
+		qcom,cmm-data-offset = <0>;
+		qcom,cmm-data-size = <0>;
+		spiop-read = <0x03 3 0 0 0>;
+		spiop-readseq = <0x03 3 0 0 0>;
+		spiop-queryid = <0x90 3 0 0 0>;
+		spiop-pprog = <0x02 3 0 3 100>;
+		spiop-wenable = <0x06 0 0 0 0>;
+		spiop-readst = <0x05 0 0 0 0>;
+		spiop-erase = <0x20 3 0 10 100>;
+		qcom,cam-power-seq-type = "sensor_vreg",
+			"sensor_vreg", "sensor_clk",
+			"sensor_gpio", "sensor_gpio";
+		qcom,cam-power-seq-val = "cam_vdig",
+			"cam_vio", "sensor_cam_mclk",
+			"sensor_gpio_reset",
+			"sensor_gpio_standby";
+		qcom,cam-power-seq-cfg-val = <1 1 24000000 1 1>;
+		qcom,cam-power-seq-delay = <1 1 5 5 10>;
+		cam_vdig-supply = <&pm8998_l5>;
+		cam_vio-supply = <&pm8998_lvs1>;
+		regulator-names = "cam_vdig", "cam_vio";
+		rgltr-cntrl-support;
+		rgltr-min-voltage = <1200000 0>;
+		rgltr-max-voltage = <1200000 0>;
+		rgltr-load-current = <0 80000 105000 0>;
+		qcom,gpio-no-mux = <0>;
+		gpios = <&msmgpio 26 0>,
+			<&msmgpio 37 0>,
+			<&msmgpio 36 0>;
+		gpio-reset = <1>;
+		gpio-standby = <2>;
+		gpio-req-tbl-num = <0 1 2>;
+		gpio-req-tbl-flags = <1 0 0>;
+		gpio-req-tbl-label = "CAMIF_MCLK",
+			"CAM_RESET1",
+			"CAM_STANDBY";
+		sensor-position = <0>;
+		sensor-mode = <0>;
+		cci-device = <0>;
+		cci-master = <0>;
+		status = "ok";
+		clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+		clock-cntl-level = "turbo";
+		clock-names = "cam_clk";
+		clock-rates = <24000000>;
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-fd.txt b/Documentation/devicetree/bindings/media/video/msm-cam-fd.txt
new file mode 100644
index 0000000..51b0bab
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-fd.txt
@@ -0,0 +1,154 @@
+* Qualcomm Technologies, Inc. MSM Camera FD
+
+The MSM camera Face Detection device provides dependency definitions
+for enabling Camera FD HW. MSM camera FD is implemented in multiple
+device nodes. The root FD device node has properties defined to hint
+the driver about the FD HW nodes available during the probe sequence.
+Each node has multiple properties defined for interrupts, clocks and
+regulators.
+
+=======================
+Required Node Structure
+=======================
+FD root interface node takes care of the handling Face Detection high level
+driver handling and controls underlying FD hardware present.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-fd".
+
+- compat-hw-name
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,fd".
+
+- num-fd
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported FD HW blocks.
+
+Example:
+	qcom,cam-fd {
+		compatible = "qcom,cam-fd";
+		compat-hw-name = "qcom,fd";
+		num-fd = <1>;
+	};
+
+=======================
+Required Node Structure
+=======================
+FD Node provides interface for Face Detection hardware driver
+about the device register map, interrupt map, clocks, regulators.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be one of "qcom,fd41", "qcom,fd501",
+              "qcom,fd600".
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+		to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt line associated with FD HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for FD HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+		in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for FD HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks required for FD HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-control-debugfs
+  Usage: optional
+  Value type: <string>
+  Definition: Enable/Disable clk rate control.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
+Examples:
+	cam_fd: qcom,fd@ac5a000 {
+		cell-index = <0>;
+		compatible = "qcom,fd600";
+		reg-names = "fd_core", "fd_wrapper";
+		reg = <0xac5a000 0x1000>,
+			<0xac5b000 0x400>;
+		reg-cam-base = <0x5a000 0x5b000>;
+		interrupt-names = "fd";
+		interrupts = <0 462 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "gcc_ahb_clk",
+			"gcc_axi_clk",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"fd_core_clk_src",
+			"fd_core_clk",
+			"fd_core_uar_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_FD_CORE_CLK_SRC>,
+			<&clock_camcc CAM_CC_FD_CORE_CLK>,
+			<&clock_camcc CAM_CC_FD_CORE_UAR_CLK>;
+		src-clock-name = "fd_core_clk_src";
+		clock-cntl-level = "svs";
+		clock-rates = <0 0 0 0 0 400000000 0 0>;
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
new file mode 100644
index 0000000..1d92897
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-icp.txt
@@ -0,0 +1,261 @@
+* Qualcomm Technologies, Inc. MSM Camera ICP
+
+The MSM camera ICP devices are implemented multiple device nodes.
+The root icp device node has properties defined to hint the driver
+about the number of A5,IPE and BPS nodes available during the
+probe sequence. Each node has multiple properties defined
+for interrupts, clocks and regulators.
+
+=======================
+Required Node Structure
+=======================
+ICP root interface node takes care of the handling account for number
+of A5, IPE and BPS devices present on the hardware.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-icp".
+
+- compat-hw-name
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,a5" or "qcom,ipe0" or "qcom,ipe1" or "qcom,bps".
+
+- num-a5
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported A5 processors.
+
+- num-ipe
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported IPE HW blocks.
+
+- num-bps
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported BPS HW blocks.
+
+Example:
+	qcom,cam-icp {
+		compatible = "qcom,cam-icp";
+		compat-hw-name = "qcom,a5", "qcom,ipe0", "qcom,ipe1", "qcom,bps";
+		num-a5 = <1>;
+		num-ipe = <2>;
+		num-bps = <1>;
+		status = "ok";
+	};
+
+=======================
+Required Node Structure
+=======================
+A5/IPE/BPS Node's provides interface for Image Control Processor driver
+about the A5 register map, interrupt map, clocks, regulators
+and name of firmware image.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-a5" or "qcom,cam-ipe" or "qcom,cam-bps".
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with CDM HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for CDM HW.
+
+- camss-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+		in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for CDM HW.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-control-debugfs
+  Usage: optional
+  Value type: <string>
+  Definition: Enable/Disable clk rate control.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for CDM HW.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: lowsvs, svs, svs_l1, nominal, turbo.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- fw_name
+  Usage: optional
+  Value type: <string>
+  Definition: Name of firmware image.
+
+- ubwc-cfg
+  Usage: required
+  Value type: <u32>
+  Definition: UBWC configuration.
+
+Examples:
+a5: qcom,a5@ac00000 {
+	cell-index = <0>;
+	compatible = "qcom,cam-a5";
+	reg = <0xac00000 0x6000>,
+		<0xac10000 0x8000>,
+		<0xac18000 0x3000>;
+	reg-names = "a5_qgic", "a5_sierra", "a5_csr";
+	interrupts = <0 463 0>;
+	interrupt-names = "a5";
+	regulator-names = "camss-vdd";
+	camss-vdd-supply = <&titan_top_gdsc>;
+	clock-names = "gcc_cam_ahb_clk",
+		"gcc_cam_axi_clk",
+		"soc_ahb_clk",
+		"cpas_ahb_clk",
+		"camnoc_axi_clk",
+		"icp_apb_clk",
+		"icp_atb_clk",
+		"icp_clk",
+		"icp_clk_src",
+		"icp_cti_clk",
+		"icp_ts_clk";
+	clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_ICP_APB_CLK>,
+			<&clock_camcc CAM_CC_ICP_ATB_CLK>,
+			<&clock_camcc CAM_CC_ICP_CLK>,
+			<&clock_camcc CAM_CC_ICP_CLK_SRC>,
+			<&clock_camcc CAM_CC_ICP_CTI_CLK>,
+			<&clock_camcc CAM_CC_ICP_TS_CLK>;
+
+	clock-rates = <0 0 0 80000000 0 0 0 0 600000000 0 0>;
+	clock-cntl-level = "turbo";
+	fw_name = "CAMERA_ICP.elf";
+	ubwc-cfg = <0x7F 0x1FF>;
+};
+
+qcom,ipe0 {
+	cell-index = <0>;
+	compatible = "qcom,cam-ipe";
+	regulator-names = "ipe0-vdd";
+	ipe0-vdd-supply = <&ipe_0_gdsc>;
+	clock-names = "ipe_0_ahb_clk",
+		"ipe_0_areg_clk",
+		"ipe_0_axi_clk",
+		"ipe_0_clk",
+		"ipe_0_clk_src";
+	src-clock-name = "ipe_0_clk_src";
+	clocks = <&clock_camcc CAM_CC_IPE_0_AHB_CLK>,
+			<&clock_camcc CAM_CC_IPE_0_AREG_CLK>,
+			<&clock_camcc CAM_CC_IPE_0_AXI_CLK>,
+			<&clock_camcc CAM_CC_IPE_0_CLK>,
+			<&clock_camcc CAM_CC_IPE_0_CLK_SRC>;
+
+	clock-rates = <0 0 0 0 240000000>,
+		<0 0 0 0 404000000>,
+		<0 0 0 0 480000000>,
+		<0 0 0 0 538000000>,
+		<0 0 0 0 600000000>;
+	clock-cntl-level = "lowsvs", "svs",
+		"svs_l1", "nominal", "turbo";
+};
+
+qcom,ipe1 {
+	cell-index = <1>;
+	compatible = "qcom,cam-ipe";
+	regulator-names = "ipe1-vdd";
+	ipe1-vdd-supply = <&ipe_1_gdsc>;
+	clock-names = "ipe_1_ahb_clk",
+		"ipe_1_areg_clk",
+		"ipe_1_axi_clk",
+		"ipe_1_clk",
+		"ipe_1_clk_src";
+	src-clock-name = "ipe_1_clk_src";
+	clocks = <&clock_camcc CAM_CC_IPE_1_AHB_CLK>,
+			<&clock_camcc CAM_CC_IPE_1_AREG_CLK>,
+			<&clock_camcc CAM_CC_IPE_1_AXI_CLK>,
+			<&clock_camcc CAM_CC_IPE_1_CLK>,
+			<&clock_camcc CAM_CC_IPE_1_CLK_SRC>;
+
+	clock-rates = <0 0 0 0 240000000>,
+		<0 0 0 0 404000000>,
+		<0 0 0 0 480000000>,
+		<0 0 0 0 538000000>,
+		<0 0 0 0 600000000>;
+	clock-cntl-level = "lowsvs", "svs",
+		"svs_l1", "nominal", "turbo";
+};
+
+bps: qcom,bps {
+	cell-index = <0>;
+	compatible = "qcom,cam-bps";
+	regulator-names = "bps-vdd";
+	bps-vdd-supply = <&bps_gdsc>;
+	clock-names = "bps_ahb_clk",
+		"bps_areg_clk",
+		"bps_axi_clk",
+		"bps_clk",
+		"bps_clk_src";
+	src-clock-name = "bps_clk_src";
+	clocks = <&clock_camcc CAM_CC_BPS_AHB_CLK>,
+			<&clock_camcc CAM_CC_BPS_AREG_CLK>,
+			<&clock_camcc CAM_CC_BPS_AXI_CLK>,
+			<&clock_camcc CAM_CC_BPS_CLK>,
+			<&clock_camcc CAM_CC_BPS_CLK_SRC>;
+
+	clock-rates = <0 0 0 0 200000000>,
+		<0 0 0 0 404000000>,
+		<0 0 0 0 480000000>,
+		<0 0 0 0 600000000>,
+		<0 0 0 0 600000000>;
+	clock-cntl-level = "lowsvs", "svs",
+		"svs_l1", "nominal", "turbo";
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt b/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt
new file mode 100644
index 0000000..c1b97a5
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-ife-csid.txt
@@ -0,0 +1,121 @@
+* Qualcomm Technologies, Inc. MSM Camera IFE CSID
+
+Camera IFE CSID device provides the definitions for enabling
+the IFE CSID hardware. It also provides the functions for the client
+to control the IFE CSID hardware.
+
+=======================
+Required Node Structure
+=======================
+The IFE CSID device is described in one level of the device node.
+
+======================================
+First Level Node - CAM IFE CSID device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,csid170", "qcom,csid175", "qcom,csid480",
+              "qcom,csid-lite170", "qcom,csid-lite175"
+              or "qcom,csid-lite480".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Should be "csid".
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- interrupt-names
+  Usage: Required
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: Required
+  Value type: <u32>
+  Definition: Interrupt associated with IFE CSID HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for IFE CSID HW.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+		"regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for IFE CSID HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for IFE CSID HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: All different clock level node can support.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-control-debugfs
+  Usage: optional
+  Value type: <string>
+  Definition: Enable/Disable clk rate control.
+
+Example:
+
+	qcom,csid0@acb3000 {
+		cell-index = <0>;
+		compatible = "qcom,csid480";
+		reg = <0xacb3000 0x1000>;
+		reg-names = "csid";
+		interrupts = <0 464 0>;
+		interrupt-names = "csid";
+		vdd-names = "camss", "ife0";
+		camss-supply = <&titan_top_gdsc>;
+		ife0-supply = <&ife_0_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"ife_cphy_rx_clk",
+			"cphy_rx_clk_src";
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CPHY_RX_CLK>,
+			<&clock_camcc CAM_CC_CPHY_RX_CLK_SRC>;
+		clock-rates = <0 0 80000000 0 320000000 0 384000000 0 384000000>;
+		src-clock-name = "ife_csid_clk_src";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt b/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt
new file mode 100644
index 0000000..13aae64
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-isp.txt
@@ -0,0 +1,31 @@
+* Qualcomm Technologies, Inc. MSM Camera ISP
+
+The MSM camera ISP driver provides the definitions for enabling
+the Camera ISP hadware. It provides the functions for the Client to
+control the ISP hardware.
+
+=======================
+Required Node Structure
+=======================
+The camera ISP device is described in one level of device node.
+
+==================================
+First Level Node - CAM ISP device
+==================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-isp".
+
+- arch-compat
+  Usage: required
+  Value type: <string>
+  Definition: Should be "vfe" or "ife".
+
+Example:
+
+	qcom,cam-isp {
+		compatible = "qcom,cam-isp";
+		arch-compat = "ife";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt b/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt
new file mode 100644
index 0000000..73e99b2
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-jpeg.txt
@@ -0,0 +1,186 @@
+* Qualcomm Technologies, Inc. MSM Camera JPEG
+
+The MSM camera JPEG devices are implemented multiple device nodes.
+The root JPEG device node has properties defined to hint the driver
+about the number of Encoder and DMA nodes available during the
+probe sequence. Each node has multiple properties defined
+for interrupts, clocks and regulators.
+
+=======================
+Required Node Structure
+=======================
+JPEG root interface node takes care of the handling account for number
+of Encoder and DMA devices present on the hardware.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-jpeg".
+
+- compat-hw-name
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,jpegenc" or "qcom,jpegdma".
+
+- num-jpeg-enc
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported Encoder HW blocks.
+
+- num-jpeg-dma
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported DMA HW blocks.
+
+Example:
+	qcom,cam-jpeg {
+		compatible = "qcom,cam-jpeg";
+		compat-hw-name = "qcom,jpegenc",
+			"qcom,jpegdma";
+		num-jpeg-enc = <1>;
+		num-jpeg-dma = <1>;
+		status = "ok";
+	};
+
+
+=======================
+Required Node Structure
+=======================
+Encoder/DMA Nodes provide interface for JPEG driver about
+the device register map, interrupt map, clocks and regulators.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam_jpeg_enc".
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources.
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values.
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+		to Camera base register space.
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt associated with JPEG HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for JPEG HW.
+
+- camss-vdd-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+		in "regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for JPEG HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for JPEG HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels.
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo.
+
+Examples:
+	cam_jpeg_enc: qcom,jpegenc@ac4e000 {
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_enc";
+		reg-names = "jpege_hw";
+		reg = <0xac4e000 0x4000>;
+		reg-cam-base = <0x4e000>;
+		interrupt-names = "jpeg";
+		interrupts = <0 474 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegenc_clk_src",
+			"jpegenc_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegenc_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
+
+	cam_jpeg_dma: qcom,jpegdma@0xac52000{
+		cell-index = <0>;
+		compatible = "qcom,cam_jpeg_dma";
+		reg-names = "jpegdma_hw";
+		reg = <0xac52000 0x4000>;
+		reg-cam-base = <0x52000>;
+		interrupt-names = "jpegdma";
+		interrupts = <0 475 0>;
+		regulator-names = "camss-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"jpegdma_clk_src",
+			"jpegdma_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_JPEG_CLK_SRC>,
+			<&clock_camcc CAM_CC_JPEG_CLK>;
+
+		clock-rates = <0 0 0 0 0 600000000 0>;
+		src-clock-name = "jpegdma_clk_src";
+		clock-cntl-level = "nominal";
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt b/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt
new file mode 100644
index 0000000..409be3f
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-lrme.txt
@@ -0,0 +1,148 @@
+* Qualcomm Technologies, Inc. MSM Camera LRME
+
+The MSM camera Low Resolution Motion Estimation device provides dependency
+definitions for enabling Camera LRME HW. MSM camera LRME is implemented in
+multiple device nodes. The root LRME device node has properties defined to
+hint the driver about the LRME HW nodes available during the probe sequence.
+Each node has multiple properties defined for interrupts, clocks and
+regulators.
+
+=======================
+Required Node Structure
+=======================
+LRME root interface node takes care of the handling LRME high level
+driver handling and controls underlying LRME hardware present.
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,cam-lrme"
+
+- compat-hw-name
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,lrme"
+
+- num-lrme
+  Usage: required
+  Value type: <u32>
+  Definition: Number of supported LRME HW blocks
+
+Example:
+	qcom,cam-lrme {
+		compatible = "qcom,cam-lrme";
+		compat-hw-name = "qcom,lrme";
+		num-lrme = <1>;
+	};
+
+=======================
+Required Node Structure
+=======================
+LRME Node provides interface for Low Resolution Motion Estimation hardware
+driver about the device register map, interrupt map, clocks, regulators.
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Node instance number
+
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,lrme"
+
+- reg-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the register resources
+
+- reg
+  Usage: optional
+  Value type: <u32>
+  Definition: Register values
+
+- reg-cam-base
+  Usage: optional
+  Value type: <u32>
+  Definition: Offset of the register space compared to
+		to Camera base register space
+
+- interrupt-names
+  Usage: optional
+  Value type: <string>
+  Definition: Name of the interrupt
+
+- interrupts
+  Usage: optional
+  Value type: <u32>
+  Definition: Interrupt line associated with LRME HW
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for LRME HW
+
+- camss-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed
+		in "regulator-names"
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for LRME HW
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks required for LRME HW
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates
+
+- clock-cntl-level
+  Usage: required
+  Value type: <string>
+  Definition: List of strings corresponds clock-rates levels
+  Supported strings: minsvs, lowsvs, svs, svs_l1, nominal, turbo
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name
+
+Examples:
+	cam_lrme: qcom,lrme@ac6b000 {
+		cell-index = <0>;
+		compatible = "qcom,lrme";
+		reg-names = "lrme";
+		reg = <0xac6b000 0xa00>;
+		reg-cam-base = <0x6b000>;
+		interrupt-names = "lrme";
+		interrupts = <0 476 0>;
+		regulator-names = "camss";
+		camss-supply = <&titan_top_gdsc>;
+		clock-names = "camera_ahb",
+			"camera_axi",
+			"soc_ahb_clk",
+			"cpas_ahb_clk",
+			"camnoc_axi_clk",
+			"lrme_clk_src",
+			"lrme_clk";
+		clocks = <&clock_gcc GCC_CAMERA_AHB_CLK>,
+			<&clock_gcc GCC_CAMERA_AXI_CLK>,
+			<&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_LRME_CLK_SRC>,
+			<&clock_camcc CAM_CC_LRME_CLK>;
+		clock-rates = <0 0 0 0 0 0 0>,
+			<0 0 0 0 0 19200000 19200000>,
+			<0 0 0 0 0 19200000 19200000>,
+			<0 0 0 0 0 19200000 19200000>;
+		clock-cntl-level = "lowsvs", "svs", "svs_l1", "turbo";
+		src-clock-name = "lrme_core_clk_src";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
new file mode 100644
index 0000000..eca2bd8
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-smmu.txt
@@ -0,0 +1,142 @@
+* Qualcomm Technologies, Inc. MSM Camera SMMU
+
+The MSM camera SMMU device provides SMMU context bank definitions
+for all HW blocks that need to map IOVA to physical memory. These
+definitions consist of various properties that define how the
+IOVA address space is laid out for each HW block in the camera
+subsystem.
+
+=======================
+Required Node Structure
+=======================
+The camera SMMU device must be described in three levels of device nodes. The
+first level describes the overall SMMU device. Within it, second level nodes
+describe individual context banks that map different stream ids. There can
+also be second level nodes describing firmware device nodes. Each HW block
+such as IFE, ICP maps into these second level device nodes. All context bank
+specific properties that define how the IOVA is laid out is contained within
+third level device nodes within the second level device nodes.
+
+During the kernel initialization all the devices are probed recursively and
+a device pointer is created for each context bank keeping track of the IOVA
+mapping information.
+
+Duplicate regions of the same type are not allowed within the same
+context bank. All context banks must contain an IO region at the very least.
+
+==================================
+First Level Node - CAM SMMU device
+==================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,msm-cam-smmu".
+
+===================================================================
+Second Level Node - CAM SMMU context bank device or firmware device
+===================================================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,msm-cam-smmu-cb" or "qcom,msm-cam-smmu-fw-dev".
+
+- memory-region
+  Usage: optional
+  Value type: <phandle>
+  Definition: Should specify the phandle of the memory region for firmware.
+		allocation
+
+- iommus
+  Usage: required
+  Value type: <phandle u32 u32>
+  Definition: first cell is phandle of the iommu, second cell is stream id
+              and third cell is SMR mask.
+
+- label
+  Usage: required
+  Value type: <string>
+  Definition: Should specify a string label to identify the context bank.
+
+- qcom,secure-cb
+  Usage: optional
+  Value type: boolean
+  Definition: Specifies if the context bank is a secure context bank.
+
+=============================================
+Third Level Node - CAM SMMU memory map device
+=============================================
+- iova-region-name
+  Usage: required
+  Value type: <string>
+  Definition: Should specify a string label to identify the IOVA region.
+
+- iova-region-start
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify start IOVA for region.
+
+- iova-region-len
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify length for IOVA region.
+
+- iova-region-id
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the numerical identifier for IOVA region.
+	Allowed values are: 0x00 to 0x03
+		- Firmware region: 0x00
+		- Shared region: 0x01
+		- Scratch region: 0x02
+		- IO region: 0x03
+
+- iova-granularity
+  Usage: optional
+  Value type: <u32>
+  Definition: Should specify IOVA granularity for shared memory region.
+
+Example:
+	qcom,cam_smmu@0 {
+		compatible = "qcom,msm-cam-smmu";
+
+		msm_cam_smmu_icp {
+			compatible = "qcom,msm-cam-smmu-cb";
+			iommus = <&apps_smmu 0x1078>,
+				<&apps_smmu 0x1020>,
+				<&apps_smmu 0x1028>,
+				<&apps_smmu 0x1040>,
+				<&apps_smmu 0x1048>,
+				<&apps_smmu 0x1030>,
+				<&apps_smmu 0x1050>;
+			label = "icp";
+			icp_iova_mem_map: iova-mem-map {
+				iova-mem-region-firmware {
+					/* Firmware region is 5MB */
+				        iova-region-name = "firmware";
+				        iova-region-start = <0x0>;
+				        iova-region-len = <0x500000>;
+					iova-region-id = <0x0>;
+					status = "ok";
+				};
+
+			        iova-mem-region-shared {
+					/* Shared region is 100MB long */
+				        iova-region-name = "shared";
+				        iova-region-start = <0x7400000>;
+				        iova-region-len = <0x6400000>;
+					iova-region-id = <0x1>;
+					iova-granularity = <0x15>;
+					status = "ok";
+				};
+
+			        iova-mem-region-io {
+				        /* IO region is approximately 3.5 GB */
+				        iova-region-name = "io";
+					iova-region-start = <0xd800000>;
+				        iova-region-len = <0xd2800000>;
+				        iova-region-id = <0x3>;
+				        status = "ok";
+				};
+			};
+		};
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt b/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
new file mode 100644
index 0000000..e3264f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-vfe.txt
@@ -0,0 +1,135 @@
+* Qualcomm Technologies, Inc. MSM Camera VFE
+
+Camera VFE device provides the definitions for enabling
+the VFE hardware. It also provides the functions for the client
+to control the VFE hardware.
+
+=======================
+Required Node Structure
+=======================
+The VFE device is described in one level of the device node.
+
+======================================
+First Level Node - CAM VFE device
+======================================
+Required properties:
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should specify the compatibility string for matching the
+	driver. e.g. "qcom,vfe480", "qcom,vfe175", "qcom,vfe170",
+	"qcom,vfe-lite480", "qcom,vfe-lite175", "qcom,vfe-lite170".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg-names
+  Usage: required
+  Value type: <string>
+  Definition: Should specify the name of the register block.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- interrupt-names
+  Usage: Required
+  Value type: <string>
+  Definition: Name of the interrupt.
+
+- interrupts
+  Usage: Required
+  Value type: <u32>
+  Definition: Interrupt associated with VFE HW.
+
+- regulator-names
+  Usage: required
+  Value type: <string>
+  Definition: Name of the regulator resources for VFE HW.
+
+- xxxx-supply
+  Usage: required
+  Value type: <phandle>
+  Definition: Regulator reference corresponding to the names listed in
+	"regulator-names".
+
+- clock-names
+  Usage: required
+  Value type: <string>
+  Definition: List of clock names required for VFE HW.
+
+- clocks
+  Usage: required
+  Value type: <phandle>
+  Definition: List of clocks used for VFE HW.
+
+- clock-rates
+  Usage: required
+  Value type: <u32>
+  Definition: List of clocks rates.
+
+- src-clock-name
+  Usage: required
+  Value type: <string>
+  Definition: Source clock name.
+
+Optional properties:
+- clock-names-option
+  Usage: optional
+  Value type: <string>
+  Definition: Optional clock names.
+
+- clocks-option
+  Usage: required if clock-names-option defined
+  Value type: <phandle>
+  Definition: List of optinal clocks used for VFE HW.
+
+- clock-rates-option
+  Usage: required if clock-names-option defined
+  Value type: <u32>
+  Definition: List of clocks rates for optional clocks.
+
+- clock-control-debugfs
+  Usage: optional
+  Value type: <string>
+  Definition: Enable/Disable clk rate control.
+
+Example:
+	qcom,vfe0@acaf000 {
+		cell-index = <0>;
+		compatible = "qcom,vfe480";
+		reg-names = "ife";
+		reg = <0xacaf000 0x4000>;
+		interrupts = <0 465 0>;
+		interrupt-names = "ife";
+		vdd-names = "camss-vdd", "ife0-vdd";
+		camss-vdd-supply = <&titan_top_gdsc>;
+		ife0-vdd-supply = <&ife_0_gdsc>;
+		clock-names = "soc_ahb_clk",
+			"cpas_ahb_clk",
+			"slow_ahb_clk_src",
+			"ife_clk",
+			"ife_clk_src",
+			"ife_csid_clk",
+			"ife_csid_clk_src",
+			"camnoc_axi_clk",
+			"ife_axi_clk",
+		clocks = <&clock_camcc CAM_CC_SOC_AHB_CLK>,
+			<&clock_camcc CAM_CC_CPAS_AHB_CLK>,
+			<&clock_camcc CAM_CC_SLOW_AHB_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CLK_SRC>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_CSID_CLK_SRC>,
+			<&clock_camcc CAM_CC_CAMNOC_AXI_CLK>,
+			<&clock_camcc CAM_CC_IFE_0_AXI_CLK>,
+		clock-rates = <0 0 80000000 0 320000000 0 384000000 0 0 0>;
+		src-clock-name = "ife_clk_src";
+		clock-names-option = "ife_dsp_clk";
+		clocks-option = <&clock_camcc CAM_CC_IFE_0_DSP_CLK>;
+		clock-rates-option = <600000000>;
+		status = "ok";
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
new file mode 100644
index 0000000..ab81329
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-camera-flash.txt
@@ -0,0 +1,132 @@
+* Qualcomm Technologies, Inc. MSM FLASH
+
+The MSM camera Flash driver provides the definitions for
+enabling and disabling LED Torch/Flash by requesting it to
+PMIC/I2C/GPIO based hardware. It provides the functions for
+the Client to control the Flash hardware.
+
+=======================================================
+Required Node Structure
+=======================================================
+The Flash device is described in one level of the device node.
+
+======================================
+First Level Node - CAM FLASH device
+======================================
+- compatible
+  Usage: required
+  Value type: <string>
+  Definition: Should be "qcom,camera-flash".
+
+- cell-index
+  Usage: required
+  Value type: <u32>
+  Definition: Should specify the hardware index id.
+
+- reg
+  Usage: required
+  Value type: <u32>
+  Definition: Register values.
+
+- flash-source
+  Usage: required
+  Value type: <phandle>
+  Definition: Should contain array of phandles to Flash source nodes.
+
+- torch-source
+  Usage: required
+  Value type: <phandle>
+  Definition: Should contain array of phandles to torch source nodes.
+
+- switch-source
+  Usage: Optional
+  Value type: <phandle>
+  Definition: Should contain phandle to switch source nodes.
+
+- slave-id
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain i2c slave address, device id address
+		and expected id read value.
+
+- cci-master
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain i2c master id to be used for this camera
+		flash.
+
+- max-current
+  Usage: optional
+  Value type: <u32>
+  Definition: Max current in mA supported by flash
+
+- max-duration
+  Usage: optional
+  Value type: <u32>
+  Definition: Max duration in ms flash can glow.
+
+- wled-flash-support
+  Usage: optional
+  Value type: <boolean>
+  Definition: To identity wled flash hardware support.
+
+- gpios
+  Usage: optional
+  Value type: <u32>
+  Definition: should specify the gpios to be used for the flash.
+
+- gpio-req-tbl-num
+  Usage: optional
+  Value type: <u32>
+  Definition: should specify the gpio table index.
+
+- gpio-req-tbl-flags
+  Usage: optional
+  Value type: <u32>
+  Definition: should specify the gpio functions.
+
+- gpio-req-tbl-label
+  Usage: optional
+  Value type: <u32>
+  Definition: should specify the gpio labels.
+
+- gpio-flash-reset
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by flash's "flash reset" pin.
+
+- gpio-flash-en
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by flash's "flash enable" pin.
+
+- gpio-flash-now
+  Usage: optional
+  Value type: <u32>
+  Definition: should contain index to gpio used by flash's "flash now" pin.
+
+Example:
+
+led_flash_rear: qcom,camera-flash@0 {
+		reg = <0x00 0x00>;
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pmi8998_flash0 &pmi8998_flash1>;
+		torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+		switch-source = <&pmi8998_switch0>;
+		wled-flash-support;
+		qcom,slave-id = <0x00 0x00 0x0011>;
+		qcom,cci-master = <0>;
+		gpios = <&msmgpio 23 0>,
+			<&msmgpio 24 0>;
+			<&msmgpio 25 0>;
+		qcom,gpio-flash-reset = <0>;
+		qcom,gpio-flash-en = <0>;
+		qcom,gpio-flash-now = <1>;
+		qcom,gpio-req-tbl-num = <0 1>;
+		qcom,gpio-req-tbl-flags = <0 0>;
+		qcom,gpio-req-tbl-label = "FLASH_EN",
+			"FLASH_NOW";
+		qcom,max-current = <1500>;
+		qcom,max-duration = <1200>;
+	};
diff --git a/Documentation/devicetree/bindings/media/video/msm-camera.txt b/Documentation/devicetree/bindings/media/video/msm-camera.txt
new file mode 100644
index 0000000..04548ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-camera.txt
@@ -0,0 +1,13 @@
+* Qualcomm Technologies, Inc. MSM Camera
+
+Required properties:
+- compatible :
+    - "qcom,cam-req-mgr"
+- qcom,sensor-manual-probe : specify if sensor probes at kernel boot time or user driven
+
+Example:
+
+   qcom,cam-req-mgr {
+       compatible = "qcom,cam-req-mgr";
+       qcom,sensor-manual-probe;
+   };
diff --git a/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
new file mode 100644
index 0000000..ab2bbe4
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/qpnp-pdphy.txt
@@ -0,0 +1,74 @@
+Qualcomm Technologies, Inc. QPNP PD PHY - USB Power Delivery Physical layer
+
+Required properties:
+- compatible:		Must be "qcom,qpnp-pdphy"
+- reg:			The base address for this peripheral
+- vdd-pdphy-supply:	phandle to the VDD supply regulator node
+- interrupts:		Specifies the interrupt associated with the peripheral.
+- interrupt-names:	Specifies the interrupt names for the peripheral. Every
+			available interrupt needs to have an associated name
+			with it to indentify its purpose.
+
+			The following interrupts are required:
+
+			0: sig-tx
+				Triggers when a signal (HardReset or CableReset)
+				has been sent.
+			1: sig-rx
+				Triggers when a signal has been received.
+			2: msg-tx
+				Triggers when a message has been sent and the
+				related GoodCRC has been received.
+			3: msg-rx
+				Triggers when a message has been received and
+				the related GoodCRC was sent successfully.
+			4: msg-tx-failed
+				Triggers when a message failed all its
+				transmission attempts, either due to a non-idle
+				bus or missing GoodCRC reply.
+			5: msg-tx-discarded
+				Triggers when a message is received while a
+				transmission request was in place. The request
+				itself is discarded.
+			6: msg-rx-discarded
+				Triggers when a message was received but had to
+				be discarded due to the RX buffer still in use
+				by SW.
+
+Optional properties:
+- vbus-supply:		Regulator that enables VBUS source output
+- vconn-supply:		Regulator that enables VCONN source output. This will
+			be supplied on the USB CC line that is not used for
+			communication when Ra resistance is detected.
+- qcom,vconn-uses-external-source: Indicates whether VCONN supply is sourced
+			from an external regulator. If omitted, then it is
+			assumed it is connected to VBUS.
+- qcom,default-sink-caps: List of 32-bit values representing the nominal sink
+			capabilities in voltage (millivolts) and current
+			(milliamps) pairs.
+
+Example:
+	qcom,qpnp-pdphy@1700 {
+		compatible = "qcom,qpnp-pdphy";
+		reg = <0x1700 0x100>;
+		vdd-pdphy-supply = <&pm8998_l24>;
+		interrupts = <0x2 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x2 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x3 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x4 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x5 IRQ_TYPE_EDGE_RISING>,
+			     <0x2 0x17 0x6 IRQ_TYPE_EDGE_RISING>;
+
+		interrupt-names = "sig-tx",
+				  "sig-rx",
+				  "msg-tx",
+				  "msg-rx",
+				  "msg-tx-failed",
+				  "msg-tx-discarded",
+				  "msg-rx-discarded";
+
+		qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
+					 <9000 3000>, /* 9V @ 3A */
+					 <12000 2250>; /* 12V @ 2.25A */
+	};
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f234dbe..eaaab03 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -883,6 +883,11 @@
 
 	Default: 1024
 
+reserved_port_bind - BOOLEAN
+	If set, allows explicit bind requests to applications requesting
+	any port within the range of ip_local_reserved_ports.
+	Default: 1
+
 ip_nonlocal_bind - BOOLEAN
 	If set, allows processes to bind() to non-local IP addresses,
 	which can be quite useful - but may break some applications.
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 0978282..2a26e31 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -498,6 +498,18 @@
 	__smp_cross_call(target, ipinr);
 }
 
+DEFINE_PER_CPU(bool, pending_ipi);
+static void smp_cross_call_common(const struct cpumask *cpumask,
+						unsigned int func)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, cpumask)
+		per_cpu(pending_ipi, cpu) = true;
+
+	smp_cross_call(cpumask, func);
+}
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
 	unsigned int cpu, i;
@@ -526,31 +538,32 @@
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-	smp_cross_call(mask, IPI_CALL_FUNC);
+	smp_cross_call_common(mask, IPI_CALL_FUNC);
 }
 
 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
 {
-	smp_cross_call(mask, IPI_WAKEUP);
+	smp_cross_call_common(mask, IPI_WAKEUP);
 }
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
+	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
 #ifdef CONFIG_IRQ_WORK
 void arch_irq_work_raise(void)
 {
 	if (arch_irq_work_has_interrupt())
-		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+		smp_cross_call_common(cpumask_of(smp_processor_id()),
+				IPI_IRQ_WORK);
 }
 #endif
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 void tick_broadcast(const struct cpumask *mask)
 {
-	smp_cross_call(mask, IPI_TIMER);
+	smp_cross_call_common(mask, IPI_TIMER);
 }
 #endif
 
@@ -666,12 +679,14 @@
 
 	if ((unsigned)ipinr < NR_IPI)
 		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+
+	per_cpu(pending_ipi, cpu) = false;
 	set_irq_regs(old_regs);
 }
 
 void smp_send_reschedule(int cpu)
 {
-	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
 void smp_send_stop(void)
@@ -682,7 +697,7 @@
 	cpumask_copy(&mask, cpu_online_mask);
 	cpumask_clear_cpu(smp_processor_id(), &mask);
 	if (!cpumask_empty(&mask))
-		smp_cross_call(&mask, IPI_CPU_STOP);
+		smp_cross_call_common(&mask, IPI_CPU_STOP);
 
 	/* Wait up to one second for other CPUs to stop */
 	timeout = USEC_PER_SEC;
@@ -755,7 +770,16 @@
 
 static void raise_nmi(cpumask_t *mask)
 {
-	smp_cross_call(mask, IPI_CPU_BACKTRACE);
+	/*
+	 * Generate the backtrace directly if we are running in a calling
+	 * context that is not preemptible by the backtrace IPI. Note
+	 * that nmi_cpu_backtrace() automatically removes the current cpu
+	 * from mask.
+	 */
+	if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled())
+		nmi_cpu_backtrace(NULL);
+
+	smp_cross_call_common(mask, IPI_CPU_BACKTRACE);
 }
 
 void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-ext-bridge-1080p.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-ext-bridge-1080p.dtsi
new file mode 100644
index 0000000..3afe49b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-ext-bridge-1080p.dtsi
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_ext_bridge_1080p: qcom,mdss_dsi_ext_bridge_1080p {
+		qcom,mdss-dsi-panel-name = "ext video mode dsi bridge";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_pulse";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-t-clk-post = <0x03>;
+		qcom,mdss-dsi-t-clk-pre = <0x24>;
+		qcom,mdss-dsi-force-clock-lane-hs;
+		qcom,mdss-dsi-ext-bridge-mode;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1920>;
+				qcom,mdss-dsi-panel-height = <1080>;
+				qcom,mdss-dsi-h-front-porch = <88>;
+				qcom,mdss-dsi-h-back-porch = <148>;
+				qcom,mdss-dsi-h-pulse-width = <44>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <36>;
+				qcom,mdss-dsi-v-front-porch = <4>;
+				qcom,mdss-dsi-v-pulse-width = <5>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,display-topology = <1 0 1>;
+				qcom,default-topology-index = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-hx83112a-truly-singlemipi-fhd-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-hx83112a-truly-singlemipi-fhd-video.dtsi
new file mode 100644
index 0000000..0363dd4
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-hx83112a-truly-singlemipi-fhd-video.dtsi
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_hx83112a_truly_video: qcom,mdss_dsi_hx83112a_truly_video {
+		qcom,mdss-dsi-panel-name =
+			"hx83112a video mode dsi truly panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <65>;
+		qcom,mdss-pan-physical-height-dimension = <129>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <2160>;
+				qcom,mdss-dsi-h-front-porch = <42>;
+				qcom,mdss-dsi-h-back-porch = <42>;
+				qcom,mdss-dsi-h-pulse-width = <10>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <15>;
+				qcom,mdss-dsi-v-front-porch = <10>;
+				qcom,mdss-dsi-v-pulse-width = <3>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+				  39 01 00 00 00 00 04 B9 83 11 2A
+				  39 01 00 00 00 00 09 B1 08 29 29 00 00 4F 54
+				     33
+				  39 01 00 00 00 00 11 B2 00 02 00 80 70 00 08
+				     26 FC 01 00 03 15 A3 87 09
+				  39 01 00 00 00 00 02 BD 02
+				  39 01 00 00 00 00 02 BD 00
+				  39 01 00 00 00 00 03 D2 2C 2C
+				  39 01 00 00 00 00 1C B4 01 CE 01 CE 01 CE 0A
+				     CE 0A CE 0A CE 00 FF 00 FF 00 00 22 23 00
+				     28 0A 13 14 00 8A
+				  39 01 00 00 00 00 02 BD 02
+				  39 01 00 00 00 00 0A B4 00 92 12 22 88 12 12
+				     00 53
+				  39 01 00 00 00 00 02 BD 00
+				  39 01 00 00 00 00 04 B6 82 82 E3
+				  39 01 00 00 00 00 02 CC 08
+				  39 01 00 00 00 00 2B D3 40 00 00 00 00 01 01
+				     0A 0A 07 07 00 08 09 09 09 09 32 10 09 00
+				     09 32 21 0A 00 0A 32 10 08 00 00 00 00 00
+				     00 00 00 00 0B 08 82
+				  39 01 00 00 00 00 02 BD 01
+				  39 01 00 00 00 00 09 D3 00 00 19 00 00 0A 00
+				     81
+				  39 01 00 00 00 00 02 BD 00
+				  39 01 00 00 00 00 31 D5 18 18 18 18 18 18 18
+				     18 31 31 30 30 2F 2F 31 31 30 30 2F 2F C0
+				     18 40 40 01 00 07 06 05 04 03 02 21 20 18
+				     18 19 19 18 18 03 03 18 18 18 18 18 18
+				  39 01 00 00 00 00 31 D6 18 18 18 18 18 18 18
+				     18 31 31 30 30 2F 2F 31 31 30 30 2F 2F C0
+				     18 40 40 02 03 04 05 06 07 00 01 20 21 18
+				     18 18 18 19 19 20 20 18 18 18 18 18 18
+				  39 01 00 00 00 00 19 D8 00 00 00 00 00 00 00
+				     00 00 00 00 00 00 00 00 00 00 00 00 00 00
+				     00 00 00
+				  39 01 00 00 00 00 02 BD 01
+				  39 01 00 00 00 00 19 D8 AA AA AA AA AA AA AA
+				     AA AA AA AA AA AA AA AA AA AA AA AA AA AA
+				     AA AA AA
+				  39 01 00 00 00 00 02 BD 02
+				  39 01 00 00 00 00 0D D8 AF FF FA AA BA AA AA
+				     FF FA AA BA AA
+				  39 01 00 00 00 00 02 BD 03
+				  39 01 00 00 00 00 19 D8 AA AA AA AA AA AA AA
+				     AA AA AA AA AA AA AA AA AA AA AA AA AA AA
+				     AA AA AA
+				  39 01 00 00 00 00 02 BD 00
+				  39 01 00 00 00 00 18 E7 0E 0E 1E 6A 1D 6A 00
+				     32 02 02 00 00 02 02 02 05 14 14 32 B9 23
+				     B9 08
+				  39 01 00 00 00 00 02 BD 01
+				  39 01 00 00 00 00 0A E7 02 00 98 01 9A 0D A8
+				     0E 01
+				  39 01 00 00 00 00 02 BD 02
+				  39 01 00 00 00 00 1E E7 00 00 08 00 01 00 00
+				     00 00 00 00 00 00 00 00 00 00 00 00 00 00
+				     00 04 00 00 00 00 02 00
+				  39 01 00 00 00 00 02 BD 00
+				  39 01 00 00 00 00 02 C1 01
+				  39 01 00 00 00 00 02 BD 01
+				  39 01 00 00 00 00 3A C1 FF F7 F0 E9 E2 DB D4
+				     C6 BF B8 B1 AB A5 9F 99 94 8E 8A 85 7C 74
+				     6C 65 5F 58 52 4B 47 42 3C 37 31 2C 27 22
+				     1C 18 12 0D 08 05 04 02 01 00 27 B9 BE 54
+				     C6 B8 9C 37 43 3D E5 00
+				  39 01 00 00 00 00 02 BD 02
+				  39 01 00 00 00 00 3A C1 FF F7 F0 E9 E2 DB D4
+				     C6 BF B8 B1 AB A5 9F 99 94 8E 8A 85 7C 74
+				     6C 65 5F 58 52 4B 47 42 3C 37 31 2C 27 22
+				     1C 18 12 0D 08 05 04 02 01 00 27 B9 BE 54
+				     C6 B8 9C 37 43 3D E5 00
+				  39 01 00 00 00 00 02 BD 03
+				  39 01 00 00 00 00 3A C1 FF F7 F0 E9 E2 DB D4
+				     C6 BF B8 B1 AB A5 9F 99 94 8E 8A 85 7C 74
+				     6C 65 5F 58 52 4B 47 42 3C 37 31 2C 27 22
+				     1C 18 12 0D 08 05 04 02 01 00 27 B9 BE 54
+				     C6 B8 9C 37 43 3D E5 00
+				  39 01 00 00 00 00 02 BD 00
+				  39 01 00 00 00 00 02 E9 C3
+				  39 01 00 00 00 00 03 CB 92 01
+				  39 01 00 00 00 00 02 E9 3F
+				  39 01 00 00 00 00 07 C7 70 00 04 E0 33 00
+				  39 01 00 00 00 00 03 51 0F FF
+				  39 01 00 00 00 00 02 53 24
+				  39 01 00 00 00 00 02 55 00
+				  15 01 00 00 00 00 02 35 00
+				  05 01 00 00 96 00 02 11 00
+				  05 01 00 00 32 00 02 29 00];
+				qcom,mdss-dsi-off-command = [
+				  05 01 00 00 32 00 02 28 00
+				  05 01 00 00 96 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-hx8394d-720p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-hx8394d-720p-video.dtsi
new file mode 100644
index 0000000..dda33de
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-hx8394d-720p-video.dtsi
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_hx8394d_720_vid: qcom,mdss_dsi_hx8394d_720p_video {
+		qcom,mdss-dsi-panel-name = "hx8394d 720p video mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <1280>;
+		qcom,mdss-dsi-h-front-porch = <52>;
+		qcom,mdss-dsi-h-back-porch = <100>;
+		qcom,mdss-dsi-h-pulse-width = <24>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <20>;
+		qcom,mdss-dsi-v-front-porch = <8>;
+		qcom,mdss-dsi-v-pulse-width = <4>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-on-command = [
+			39 01 00 00 00 00 04 b9 ff 83 94
+			39 01 00 00 00 00 03 ba 33 83
+			39 01 00 00 00 00 10 b1 6c 12 12
+				37 04 11 f1 80 ec 94 23 80 c0
+				d2 18
+			39 01 00 00 00 00 0c b2 00 64 0e
+				0d 32 23 08 08 1c 4d 00
+			39 01 00 00 00 00 0d b4 00 ff 03
+				50 03 50 03 50 01 6a 01 6a
+			39 01 00 00 00 00 02 bc 07
+			39 01 00 00 00 00 04 bf 41 0e 01
+			39 01 00 00 00 00 1f d3 00 07 00
+				00 00 10 00 32 10 05 00 00 32
+				10 00 00 00 32 10 00 00 00 36
+				03 09 09 37 00 00 37
+			39 01 00 00 00 00 2d d5 02 03 00
+				01 06 07 04 05 20 21 22 23 18
+				18 18 18 18 18 18 18 18 18 18
+				18 18 18 18 18 18 18 18 18 18
+				18 18 18 18 18 24 25 18 18 19
+				19
+			39 01 00 00 00 00 2d d6 05 04 07
+				06 01 00 03 02 23 22 21 20 18
+				18 18 18 18 18 58 58 18 18 18
+				18 18 18 18 18 18 18 18 18 18
+				18 18 18 18 18 25 24 19 19 18
+				18
+			39 01 00 00 00 00 02 cc 09
+			39 01 00 00 00 00 03 c0 30 14
+			39 01 00 00 00 00 05 c7 00 c0 40 c0
+			39 01 00 00 00 00 03 b6 43 43
+			05 01 00 00 c8 00 02 11 00
+			05 01 00 00 0a 00 02 29 00
+			];
+		qcom,mdss-dsi-off-command = [05 01 00 00 00 00 02 28 00
+				05 01 00 00 00 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-h-sync-pulse = <1>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-panel-timings = [
+			79 1a 12 00 3e 42
+			16 1e 15 03 04 00
+			];
+		qcom,mdss-dsi-t-clk-post = <0x04>;
+		qcom,mdss-dsi-t-clk-pre = <0x1b>;
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 20>;
+		qcom,mdss-pan-physical-width-dimension = <59>;
+		qcom,mdss-pan-physical-height-dimension = <104>;
+
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
new file mode 100644
index 0000000..06e482b
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-cmd.dtsi
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_nt35597_truly_dsc_cmd: qcom,mdss_dsi_nt35597_dsc_cmd_truly {
+		qcom,mdss-dsi-panel-name =
+			"nt35597 cmd mode dsi truly panel with DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-width = <1440>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <32>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <8>;
+				qcom,mdss-dsi-v-front-porch = <10>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
+				qcom,mdss-dsi-on-command = [
+					/* CMD2_P0 */
+					15 01 00 00 00 00 02 ff 20
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 02 45
+					15 01 00 00 00 00 02 05 40
+					15 01 00 00 00 00 02 06 19
+					15 01 00 00 00 00 02 07 1e
+					15 01 00 00 00 00 02 0b 73
+					15 01 00 00 00 00 02 0c 73
+					15 01 00 00 00 00 02 0e b0
+					15 01 00 00 00 00 02 0f ae
+					15 01 00 00 00 00 02 11 b8
+					15 01 00 00 00 00 02 13 00
+					15 01 00 00 00 00 02 58 80
+					15 01 00 00 00 00 02 59 01
+					15 01 00 00 00 00 02 5a 00
+					15 01 00 00 00 00 02 5b 01
+					15 01 00 00 00 00 02 5c 80
+					15 01 00 00 00 00 02 5d 81
+					15 01 00 00 00 00 02 5e 00
+					15 01 00 00 00 00 02 5f 01
+					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 68 03
+					/* CMD2_P4 */
+					15 01 00 00 00 00 02 ff 24
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 1c
+					15 01 00 00 00 00 02 01 0b
+					15 01 00 00 00 00 02 02 0c
+					15 01 00 00 00 00 02 03 01
+					15 01 00 00 00 00 02 04 0f
+					15 01 00 00 00 00 02 05 10
+					15 01 00 00 00 00 02 06 10
+					15 01 00 00 00 00 02 07 10
+					15 01 00 00 00 00 02 08 89
+					15 01 00 00 00 00 02 09 8a
+					15 01 00 00 00 00 02 0a 13
+					15 01 00 00 00 00 02 0b 13
+					15 01 00 00 00 00 02 0c 15
+					15 01 00 00 00 00 02 0d 15
+					15 01 00 00 00 00 02 0e 17
+					15 01 00 00 00 00 02 0f 17
+					15 01 00 00 00 00 02 10 1c
+					15 01 00 00 00 00 02 11 0b
+					15 01 00 00 00 00 02 12 0c
+					15 01 00 00 00 00 02 13 01
+					15 01 00 00 00 00 02 14 0f
+					15 01 00 00 00 00 02 15 10
+					15 01 00 00 00 00 02 16 10
+					15 01 00 00 00 00 02 17 10
+					15 01 00 00 00 00 02 18 89
+					15 01 00 00 00 00 02 19 8a
+					15 01 00 00 00 00 02 1a 13
+					15 01 00 00 00 00 02 1b 13
+					15 01 00 00 00 00 02 1c 15
+					15 01 00 00 00 00 02 1d 15
+					15 01 00 00 00 00 02 1e 17
+					15 01 00 00 00 00 02 1f 17
+					/* STV */
+					15 01 00 00 00 00 02 20 40
+					15 01 00 00 00 00 02 21 01
+					15 01 00 00 00 00 02 22 00
+					15 01 00 00 00 00 02 23 40
+					15 01 00 00 00 00 02 24 40
+					15 01 00 00 00 00 02 25 6d
+					15 01 00 00 00 00 02 26 40
+					15 01 00 00 00 00 02 27 40
+					/* Vend */
+					15 01 00 00 00 00 02 e0 00
+					15 01 00 00 00 00 02 dc 21
+					15 01 00 00 00 00 02 dd 22
+					15 01 00 00 00 00 02 de 07
+					15 01 00 00 00 00 02 df 07
+					15 01 00 00 00 00 02 e3 6D
+					15 01 00 00 00 00 02 e1 07
+					15 01 00 00 00 00 02 e2 07
+					/* UD */
+					15 01 00 00 00 00 02 29 d8
+					15 01 00 00 00 00 02 2a 2a
+					/* CLK */
+					15 01 00 00 00 00 02 4b 03
+					15 01 00 00 00 00 02 4c 11
+					15 01 00 00 00 00 02 4d 10
+					15 01 00 00 00 00 02 4e 01
+					15 01 00 00 00 00 02 4f 01
+					15 01 00 00 00 00 02 50 10
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 52 80
+					15 01 00 00 00 00 02 53 00
+					15 01 00 00 00 00 02 56 00
+					15 01 00 00 00 00 02 54 07
+					15 01 00 00 00 00 02 58 07
+					15 01 00 00 00 00 02 55 25
+					/* Reset XDONB */
+					15 01 00 00 00 00 02 5b 43
+					15 01 00 00 00 00 02 5c 00
+					15 01 00 00 00 00 02 5f 73
+					15 01 00 00 00 00 02 60 73
+					15 01 00 00 00 00 02 63 22
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 67 08
+					15 01 00 00 00 00 02 68 04
+					/* Resolution:1440x2560*/
+					15 01 00 00 00 00 02 72 02
+					/* mux */
+					15 01 00 00 00 00 02 7a 80
+					15 01 00 00 00 00 02 7b 91
+					15 01 00 00 00 00 02 7c D8
+					15 01 00 00 00 00 02 7d 60
+					15 01 00 00 00 00 02 7f 15
+					15 01 00 00 00 00 02 75 15
+					/* ABOFF */
+					15 01 00 00 00 00 02 b3 C0
+					15 01 00 00 00 00 02 b4 00
+					15 01 00 00 00 00 02 b5 00
+					/* Source EQ */
+					15 01 00 00 00 00 02 78 00
+					15 01 00 00 00 00 02 79 00
+					15 01 00 00 00 00 02 80 00
+					15 01 00 00 00 00 02 83 00
+					/* FP BP */
+					15 01 00 00 00 00 02 93 0a
+					15 01 00 00 00 00 02 94 0a
+					/* Inversion Type */
+					15 01 00 00 00 00 02 8a 00
+					15 01 00 00 00 00 02 9b ff
+					/* IMGSWAP =1 @PortSwap=1 */
+					15 01 00 00 00 00 02 9d b0
+					15 01 00 00 00 00 02 9f 63
+					15 01 00 00 00 00 02 98 10
+					/* FRM */
+					15 01 00 00 00 00 02 ec 00
+					/* CMD1 */
+					15 01 00 00 00 00 02 ff 10
+					/* VESA DSC PPS settings
+					 *  (1440x2560 slide 16H)
+					 */
+					39 01 00 00 00 00 11 c1 09
+					20 00 10 02 00 02 68 01 bb
+					00 0a 06 67 04 c5
+
+					39 01 00 00 00 00 03 c2 10 f0
+					/* C0h = 0x0(2 Port SDC)
+					 * 0x01(1 PortA FBC)
+					 * 0x02(MTK) 0x03(1 PortA VESA)
+					 */
+					15 01 00 00 00 00 02 c0 03
+					/* VBP+VSA=,VFP = 10H */
+					15 01 00 00 00 00 04 3b 03 0a 0a
+					/* FTE on */
+					15 01 00 00 00 00 02 35 00
+					/* EN_BK =1(auto black) */
+					15 01 00 00 00 00 02 e5 01
+					/* CMD mode(10) VDO mode(03) */
+					15 01 00 00 00 00 02 bb 10
+					/* Non Reload MTP */
+					15 01 00 00 00 00 02 fb 01
+					/* SlpOut + DispOn */
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00
+					];
+				qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+					02 28 00 05 01 00 00 78 00 02 10 00];
+
+				qcom,mdss-dsi-on-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <16>;
+				qcom,mdss-dsc-slice-width = <720>;
+				qcom,mdss-dsc-slice-per-pkt = <2>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
new file mode 100644
index 0000000..f7de699
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dsc-wqxga-video.dtsi
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_nt35597_truly_dsc_video: qcom,mdss_dsi_nt35597_dsc_video_truly {
+		qcom,mdss-dsi-panel-name =
+			"nt35597 video mode dsi truly panel with DSC";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1440>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <32>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <8>;
+				qcom,mdss-dsi-v-front-porch = <10>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+					/* CMD2_P0 */
+					15 01 00 00 00 00 02 ff 20
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 02 45
+					15 01 00 00 00 00 02 05 40
+					15 01 00 00 00 00 02 06 19
+					15 01 00 00 00 00 02 07 1e
+					15 01 00 00 00 00 02 0b 73
+					15 01 00 00 00 00 02 0c 73
+					15 01 00 00 00 00 02 0e b0
+					15 01 00 00 00 00 02 0f aE
+					15 01 00 00 00 00 02 11 b8
+					15 01 00 00 00 00 02 13 00
+					15 01 00 00 00 00 02 58 80
+					15 01 00 00 00 00 02 59 01
+					15 01 00 00 00 00 02 5a 00
+					15 01 00 00 00 00 02 5b 01
+					15 01 00 00 00 00 02 5c 80
+					15 01 00 00 00 00 02 5d 81
+					15 01 00 00 00 00 02 5e 00
+					15 01 00 00 00 00 02 5f 01
+					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 68 03
+					/* CMD2_P4 */
+					15 01 00 00 00 00 02 ff 24
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 1c
+					15 01 00 00 00 00 02 01 0b
+					15 01 00 00 00 00 02 02 0c
+					15 01 00 00 00 00 02 03 01
+					15 01 00 00 00 00 02 04 0f
+					15 01 00 00 00 00 02 05 10
+					15 01 00 00 00 00 02 06 10
+					15 01 00 00 00 00 02 07 10
+					15 01 00 00 00 00 02 08 89
+					15 01 00 00 00 00 02 09 8a
+					15 01 00 00 00 00 02 0a 13
+					15 01 00 00 00 00 02 0b 13
+					15 01 00 00 00 00 02 0c 15
+					15 01 00 00 00 00 02 0d 15
+					15 01 00 00 00 00 02 0e 17
+					15 01 00 00 00 00 02 0f 17
+					15 01 00 00 00 00 02 10 1c
+					15 01 00 00 00 00 02 11 0b
+					15 01 00 00 00 00 02 12 0c
+					15 01 00 00 00 00 02 13 01
+					15 01 00 00 00 00 02 14 0f
+					15 01 00 00 00 00 02 15 10
+					15 01 00 00 00 00 02 16 10
+					15 01 00 00 00 00 02 17 10
+					15 01 00 00 00 00 02 18 89
+					15 01 00 00 00 00 02 19 8a
+					15 01 00 00 00 00 02 1a 13
+					15 01 00 00 00 00 02 1b 13
+					15 01 00 00 00 00 02 1c 15
+					15 01 00 00 00 00 02 1d 15
+					15 01 00 00 00 00 02 1e 17
+					15 01 00 00 00 00 02 1f 17
+					/* STV */
+					15 01 00 00 00 00 02 20 40
+					15 01 00 00 00 00 02 21 01
+					15 01 00 00 00 00 02 22 00
+					15 01 00 00 00 00 02 23 40
+					15 01 00 00 00 00 02 24 40
+					15 01 00 00 00 00 02 25 6d
+					15 01 00 00 00 00 02 26 40
+					15 01 00 00 00 00 02 27 40
+					/* Vend */
+					15 01 00 00 00 00 02 e0 00
+					15 01 00 00 00 00 02 dc 21
+					15 01 00 00 00 00 02 dd 22
+					15 01 00 00 00 00 02 de 07
+					15 01 00 00 00 00 02 df 07
+					15 01 00 00 00 00 02 e3 6d
+					15 01 00 00 00 00 02 e1 07
+					15 01 00 00 00 00 02 e2 07
+					/* UD */
+					15 01 00 00 00 00 02 29 d8
+					15 01 00 00 00 00 02 2a 2a
+					/* CLK */
+					15 01 00 00 00 00 02 4b 03
+					15 01 00 00 00 00 02 4c 11
+					15 01 00 00 00 00 02 4d 10
+					15 01 00 00 00 00 02 4e 01
+					15 01 00 00 00 00 02 4f 01
+					15 01 00 00 00 00 02 50 10
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 52 80
+					15 01 00 00 00 00 02 53 00
+					15 01 00 00 00 00 02 56 00
+					15 01 00 00 00 00 02 54 07
+					15 01 00 00 00 00 02 58 07
+					15 01 00 00 00 00 02 55 25
+					/* Reset XDONB */
+					15 01 00 00 00 00 02 5b 43
+					15 01 00 00 00 00 02 5c 00
+					15 01 00 00 00 00 02 5f 73
+					15 01 00 00 00 00 02 60 73
+					15 01 00 00 00 00 02 63 22
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 67 08
+					15 01 00 00 00 00 02 68 04
+					/* Resolution:1440x2560*/
+					15 01 00 00 00 00 02 72 02
+					/* mux */
+					15 01 00 00 00 00 02 7a 80
+					15 01 00 00 00 00 02 7b 91
+					15 01 00 00 00 00 02 7c d8
+					15 01 00 00 00 00 02 7d 60
+					15 01 00 00 00 00 02 7f 15
+					15 01 00 00 00 00 02 75 15
+					/* ABOFF */
+					15 01 00 00 00 00 02 b3 c0
+					15 01 00 00 00 00 02 b4 00
+					15 01 00 00 00 00 02 b5 00
+					/* Source EQ */
+					15 01 00 00 00 00 02 78 00
+					15 01 00 00 00 00 02 79 00
+					15 01 00 00 00 00 02 80 00
+					15 01 00 00 00 00 02 83 00
+					/* FP BP */
+					15 01 00 00 00 00 02 93 0a
+					15 01 00 00 00 00 02 94 0a
+					/* Inversion Type */
+					15 01 00 00 00 00 02 8a 00
+					15 01 00 00 00 00 02 9b ff
+					/* IMGSWAP =1 @PortSwap=1 */
+					15 01 00 00 00 00 02 9d b0
+					15 01 00 00 00 00 02 9f 63
+					15 01 00 00 00 00 02 98 10
+					/* FRM */
+					15 01 00 00 00 00 02 ec 00
+					/* CMD1 */
+					15 01 00 00 00 00 02 ff 10
+					/* VESA DSC PPS settings
+					 * (1440x2560 slide 16H)
+					 */
+					39 01 00 00 00 00 11 c1 09
+					20 00 10 02 00 02 68 01	bb
+					00 0a 06 67 04 c5
+
+					39 01 00 00 00 00 03 c2 10 f0
+					/* C0h = 0x00(2 Port SDC);
+					 * 0x01(1 PortA FBC);
+					 * 0x02(MTK); 0x03(1 PortA VESA)
+					 */
+					15 01 00 00 00 00 02 c0 03
+					/* VBP+VSA=,VFP = 10H */
+					39 01 00 00 00 00 04 3b 03 0a 0a
+					/* FTE on */
+					15 01 00 00 00 00 02 35 00
+					/* EN_BK =1(auto black) */
+					15 01 00 00 00 00 02 e5 01
+					/* CMD mode(10) VDO mode(03) */
+					15 01 00 00 00 00 02 bb 03
+					/* Non Reload MTP */
+					15 01 00 00 00 00 02 fb 01
+					/* SlpOut + DispOn */
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00
+					];
+				qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+					02 28 00 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <16>;
+				qcom,mdss-dsc-slice-width = <720>;
+				qcom,mdss-dsc-slice-per-pkt = <2>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
new file mode 100644
index 0000000..f6d658c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-cmd.dtsi
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_dual_nt35597_truly_cmd: qcom,mdss_dsi_nt35597_truly_wqxga_cmd {
+		qcom,mdss-dsi-panel-name =
+			"Dual nt35597 cmd mode dsi truly panel without DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <32>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <7>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
+				qcom,mdss-dsi-on-command = [
+					/* CMD2_P0 */
+					15 01 00 00 00 00 02 FF 20
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 02 45
+					15 01 00 00 00 00 02 05 40
+					15 01 00 00 00 00 02 06 19
+					15 01 00 00 00 00 02 07 1E
+					15 01 00 00 00 00 02 0B 73
+					15 01 00 00 00 00 02 0C 73
+					15 01 00 00 00 00 02 0E B0
+					15 01 00 00 00 00 02 0F AE
+					15 01 00 00 00 00 02 11 B8
+					15 01 00 00 00 00 02 13 00
+					15 01 00 00 00 00 02 58 80
+					15 01 00 00 00 00 02 59 01
+					15 01 00 00 00 00 02 5A 00
+					15 01 00 00 00 00 02 5B 01
+					15 01 00 00 00 00 02 5C 80
+					15 01 00 00 00 00 02 5D 81
+					15 01 00 00 00 00 02 5E 00
+					15 01 00 00 00 00 02 5F 01
+					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 68 03
+					/* CMD2_P4 */
+					15 01 00 00 00 00 02 ff 24
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 1C
+					15 01 00 00 00 00 02 01 0B
+					15 01 00 00 00 00 02 02 0C
+					15 01 00 00 00 00 02 03 01
+					15 01 00 00 00 00 02 04 0F
+					15 01 00 00 00 00 02 05 10
+					15 01 00 00 00 00 02 06 10
+					15 01 00 00 00 00 02 07 10
+					15 01 00 00 00 00 02 08 89
+					15 01 00 00 00 00 02 09 8A
+					15 01 00 00 00 00 02 0A 13
+					15 01 00 00 00 00 02 0B 13
+					15 01 00 00 00 00 02 0C 15
+					15 01 00 00 00 00 02 0D 15
+					15 01 00 00 00 00 02 0E 17
+					15 01 00 00 00 00 02 0F 17
+					15 01 00 00 00 00 02 10 1C
+					15 01 00 00 00 00 02 11 0B
+					15 01 00 00 00 00 02 12 0C
+					15 01 00 00 00 00 02 13 01
+					15 01 00 00 00 00 02 14 0F
+					15 01 00 00 00 00 02 15 10
+					15 01 00 00 00 00 02 16 10
+					15 01 00 00 00 00 02 17 10
+					15 01 00 00 00 00 02 18 89
+					15 01 00 00 00 00 02 19 8A
+					15 01 00 00 00 00 02 1A 13
+					15 01 00 00 00 00 02 1B 13
+					15 01 00 00 00 00 02 1C 15
+					15 01 00 00 00 00 02 1D 15
+					15 01 00 00 00 00 02 1E 17
+					15 01 00 00 00 00 02 1F 17
+					/* STV */
+					15 01 00 00 00 00 02 20 40
+					15 01 00 00 00 00 02 21 01
+					15 01 00 00 00 00 02 22 00
+					15 01 00 00 00 00 02 23 40
+					15 01 00 00 00 00 02 24 40
+					15 01 00 00 00 00 02 25 6D
+					15 01 00 00 00 00 02 26 40
+					15 01 00 00 00 00 02 27 40
+					/* Vend */
+					15 01 00 00 00 00 02 E0 00
+					15 01 00 00 00 00 02 DC 21
+					15 01 00 00 00 00 02 DD 22
+					15 01 00 00 00 00 02 DE 07
+					15 01 00 00 00 00 02 DF 07
+					15 01 00 00 00 00 02 E3 6D
+					15 01 00 00 00 00 02 E1 07
+					15 01 00 00 00 00 02 E2 07
+					/* UD */
+					15 01 00 00 00 00 02 29 D8
+					15 01 00 00 00 00 02 2A 2A
+					/* CLK */
+					15 01 00 00 00 00 02 4B 03
+					15 01 00 00 00 00 02 4C 11
+					15 01 00 00 00 00 02 4D 10
+					15 01 00 00 00 00 02 4E 01
+					15 01 00 00 00 00 02 4F 01
+					15 01 00 00 00 00 02 50 10
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 52 80
+					15 01 00 00 00 00 02 53 00
+					15 01 00 00 00 00 02 56 00
+					15 01 00 00 00 00 02 54 07
+					15 01 00 00 00 00 02 58 07
+					15 01 00 00 00 00 02 55 25
+					/* Reset XDONB */
+					15 01 00 00 00 00 02 5B 43
+					15 01 00 00 00 00 02 5C 00
+					15 01 00 00 00 00 02 5F 73
+					15 01 00 00 00 00 02 60 73
+					15 01 00 00 00 00 02 63 22
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 67 08
+					15 01 00 00 00 00 02 68 04
+					/* Resolution:1440x2560*/
+					15 01 00 00 00 00 02 72 02
+					/* mux */
+					15 01 00 00 00 00 02 7A 80
+					15 01 00 00 00 00 02 7B 91
+					15 01 00 00 00 00 02 7C D8
+					15 01 00 00 00 00 02 7D 60
+					15 01 00 00 00 00 02 7F 15
+					15 01 00 00 00 00 02 75 15
+					/* ABOFF */
+					15 01 00 00 00 00 02 B3 C0
+					15 01 00 00 00 00 02 B4 00
+					15 01 00 00 00 00 02 B5 00
+					/* Source EQ */
+					15 01 00 00 00 00 02 78 00
+					15 01 00 00 00 00 02 79 00
+					15 01 00 00 00 00 02 80 00
+					15 01 00 00 00 00 02 83 00
+					/* FP BP */
+					15 01 00 00 00 00 02 93 0A
+					15 01 00 00 00 00 02 94 0A
+					/* Inversion Type */
+					15 01 00 00 00 00 02 8A 00
+					15 01 00 00 00 00 02 9B FF
+					/* IMGSWAP =1 @PortSwap=1 */
+					15 01 00 00 00 00 02 9D B0
+					15 01 00 00 00 00 02 9F 63
+					15 01 00 00 00 00 02 98 10
+					/* FRM */
+					15 01 00 00 00 00 02 EC 00
+					/* CMD1 */
+					15 01 00 00 00 00 02 ff 10
+					/* VBP+VSA=,VFP = 10H */
+					15 01 00 00 00 00 04 3B 03 0A 0A
+					/* FTE on */
+					15 01 00 00 00 00 02 35 00
+					/* EN_BK =1(auto black) */
+					15 01 00 00 00 00 02 E5 01
+					/* CMD mode(10) VDO mode(03) */
+					15 01 00 00 00 00 02 BB 10
+					/* Non Reload MTP */
+					15 01 00 00 00 00 02 FB 01
+					/* SlpOut + DispOn */
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00
+					];
+				qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+					02 28 00 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
new file mode 100644
index 0000000..d58defb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35597-truly-dualmipi-wqxga-video.dtsi
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_dual_nt35597_truly_video: qcom,mdss_dsi_nt35597_wqxga_video_truly {
+		qcom,mdss-dsi-panel-name =
+			"Dual nt35597 video mode dsi truly panel without DSC";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+		qcom,mdss-pan-physical-width-dimension = <74>;
+		qcom,mdss-pan-physical-height-dimension = <131>;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-underflow-color = <0x3ff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <32>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <7>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+					/* CMD2_P0 */
+					15 01 00 00 00 00 02 FF 20
+					15 01 00 00 00 00 02 FB 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 02 45
+					15 01 00 00 00 00 02 05 40
+					15 01 00 00 00 00 02 06 19
+					15 01 00 00 00 00 02 07 1E
+					15 01 00 00 00 00 02 0B 73
+					15 01 00 00 00 00 02 0C 73
+					15 01 00 00 00 00 02 0E B0
+					15 01 00 00 00 00 02 0F AE
+					15 01 00 00 00 00 02 11 B8
+					15 01 00 00 00 00 02 13 00
+					15 01 00 00 00 00 02 58 80
+					15 01 00 00 00 00 02 59 01
+					15 01 00 00 00 00 02 5A 00
+					15 01 00 00 00 00 02 5B 01
+					15 01 00 00 00 00 02 5C 80
+					15 01 00 00 00 00 02 5D 81
+					15 01 00 00 00 00 02 5E 00
+					15 01 00 00 00 00 02 5F 01
+					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 68 03
+					/* CMD2_P4 */
+					15 01 00 00 00 00 02 FF 24
+					15 01 00 00 00 00 02 FB 01
+					15 01 00 00 00 00 02 00 1C
+					15 01 00 00 00 00 02 01 0B
+					15 01 00 00 00 00 02 02 0C
+					15 01 00 00 00 00 02 03 01
+					15 01 00 00 00 00 02 04 0F
+					15 01 00 00 00 00 02 05 10
+					15 01 00 00 00 00 02 06 10
+					15 01 00 00 00 00 02 07 10
+					15 01 00 00 00 00 02 08 89
+					15 01 00 00 00 00 02 09 8A
+					15 01 00 00 00 00 02 0A 13
+					15 01 00 00 00 00 02 0B 13
+					15 01 00 00 00 00 02 0C 15
+					15 01 00 00 00 00 02 0D 15
+					15 01 00 00 00 00 02 0E 17
+					15 01 00 00 00 00 02 0F 17
+					15 01 00 00 00 00 02 10 1C
+					15 01 00 00 00 00 02 11 0B
+					15 01 00 00 00 00 02 12 0C
+					15 01 00 00 00 00 02 13 01
+					15 01 00 00 00 00 02 14 0F
+					15 01 00 00 00 00 02 15 10
+					15 01 00 00 00 00 02 16 10
+					15 01 00 00 00 00 02 17 10
+					15 01 00 00 00 00 02 18 89
+					15 01 00 00 00 00 02 19 8A
+					15 01 00 00 00 00 02 1A 13
+					15 01 00 00 00 00 02 1B 13
+					15 01 00 00 00 00 02 1C 15
+					15 01 00 00 00 00 02 1D 15
+					15 01 00 00 00 00 02 1E 17
+					15 01 00 00 00 00 02 1F 17
+					/* STV */
+					15 01 00 00 00 00 02 20 40
+					15 01 00 00 00 00 02 21 01
+					15 01 00 00 00 00 02 22 00
+					15 01 00 00 00 00 02 23 40
+					15 01 00 00 00 00 02 24 40
+					15 01 00 00 00 00 02 25 6D
+					15 01 00 00 00 00 02 26 40
+					15 01 00 00 00 00 02 27 40
+					/* Vend */
+					15 01 00 00 00 00 02 E0 00
+					15 01 00 00 00 00 02 DC 21
+					15 01 00 00 00 00 02 DD 22
+					15 01 00 00 00 00 02 DE 07
+					15 01 00 00 00 00 02 DF 07
+					15 01 00 00 00 00 02 E3 6D
+					15 01 00 00 00 00 02 E1 07
+					15 01 00 00 00 00 02 E2 07
+					/* UD */
+					15 01 00 00 00 00 02 29 D8
+					15 01 00 00 00 00 02 2A 2A
+					/* CLK */
+					15 01 00 00 00 00 02 4B 03
+					15 01 00 00 00 00 02 4C 11
+					15 01 00 00 00 00 02 4D 10
+					15 01 00 00 00 00 02 4E 01
+					15 01 00 00 00 00 02 4F 01
+					15 01 00 00 00 00 02 50 10
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 52 80
+					15 01 00 00 00 00 02 53 00
+					15 01 00 00 00 00 02 56 00
+					15 01 00 00 00 00 02 54 07
+					15 01 00 00 00 00 02 58 07
+					15 01 00 00 00 00 02 55 25
+					/* Reset XDONB */
+					15 01 00 00 00 00 02 5B 43
+					15 01 00 00 00 00 02 5C 00
+					15 01 00 00 00 00 02 5F 73
+					15 01 00 00 00 00 02 60 73
+					15 01 00 00 00 00 02 63 22
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 67 08
+					15 01 00 00 00 00 02 68 04
+					/* Resolution:1440x2560*/
+					15 01 00 00 00 00 02 72 02
+					/* mux */
+					15 01 00 00 00 00 02 7A 80
+					15 01 00 00 00 00 02 7B 91
+					15 01 00 00 00 00 02 7C D8
+					15 01 00 00 00 00 02 7D 60
+					15 01 00 00 00 00 02 7F 15
+					15 01 00 00 00 00 02 75 15
+					/* ABOFF */
+					15 01 00 00 00 00 02 B3 C0
+					15 01 00 00 00 00 02 B4 00
+					15 01 00 00 00 00 02 B5 00
+					/* Source EQ */
+					15 01 00 00 00 00 02 78 00
+					15 01 00 00 00 00 02 79 00
+					15 01 00 00 00 00 02 80 00
+					15 01 00 00 00 00 02 83 00
+					/* FP BP */
+					15 01 00 00 00 00 02 93 0A
+					15 01 00 00 00 00 02 94 0A
+					/* Inversion Type */
+					15 01 00 00 00 00 02 8A 00
+					15 01 00 00 00 00 02 9B FF
+					/* IMGSWAP =1 @PortSwap=1 */
+					15 01 00 00 00 00 02 9D B0
+					15 01 00 00 00 00 02 9F 63
+					15 01 00 00 00 00 02 98 10
+					/* FRM */
+					15 01 00 00 00 00 02 EC 00
+					/* CMD1 */
+					15 01 00 00 00 00 02 FF 10
+					/* VBP+VSA=,VFP = 10H */
+					15 01 00 00 00 00 04 3B 03 0A 0A
+					/* FTE on */
+					15 01 00 00 00 00 02 35 00
+					/* EN_BK =1(auto black) */
+					15 01 00 00 00 00 02 E5 01
+					/* CMD mode(10) VDO mode(03) */
+					15 01 00 00 00 00 02 BB 03
+					/* Non Reload MTP */
+					15 01 00 00 00 00 02 FB 01
+					/* SlpOut + DispOn */
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00
+					];
+				qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+					02 28 00 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
new file mode 100644
index 0000000..e58fb9d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_nt35695b_truly_fhd_cmd: qcom,mdss_dsi_nt35695b_truly_fhd_cmd {
+		qcom,mdss-dsi-panel-name =
+				"nt35695b truly fhd command mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-post-init-delay = <1>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <1920>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <60>;
+				qcom,mdss-dsi-h-pulse-width = <12>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <2>;
+				qcom,mdss-dsi-v-front-porch = <12>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command =
+					[15 01 00 00 10 00 02 ff 20
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 02 45
+					15 01 00 00 00 00 02 03 55
+					15 01 00 00 00 00 02 05 50
+					15 01 00 00 00 00 02 06 a8
+					15 01 00 00 00 00 02 07 ad
+					15 01 00 00 00 00 02 08 0c
+					15 01 00 00 00 00 02 0b aa
+					15 01 00 00 00 00 02 0c aa
+					15 01 00 00 00 00 02 0e b0
+					15 01 00 00 00 00 02 0f b3
+					15 01 00 00 00 00 02 11 28
+					15 01 00 00 00 00 02 12 10
+					15 01 00 00 00 00 02 13 01
+					15 01 00 00 00 00 02 14 4a
+					15 01 00 00 00 00 02 15 12
+					15 01 00 00 00 00 02 16 12
+					15 01 00 00 00 00 02 30 01
+					15 01 00 00 00 00 02 72 11
+					15 01 00 00 00 00 02 58 82
+					15 01 00 00 00 00 02 59 00
+					15 01 00 00 00 00 02 5a 02
+					15 01 00 00 00 00 02 5b 00
+					15 01 00 00 00 00 02 5c 82
+					15 01 00 00 00 00 02 5d 80
+					15 01 00 00 00 00 02 5e 02
+					15 01 00 00 00 00 02 5f 00
+					15 01 00 00 00 00 02 ff 24
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 0b
+					15 01 00 00 00 00 02 02 0c
+					15 01 00 00 00 00 02 03 89
+					15 01 00 00 00 00 02 04 8a
+					15 01 00 00 00 00 02 05 0f
+					15 01 00 00 00 00 02 06 10
+					15 01 00 00 00 00 02 07 10
+					15 01 00 00 00 00 02 08 1c
+					15 01 00 00 00 00 02 09 00
+					15 01 00 00 00 00 02 0a 00
+					15 01 00 00 00 00 02 0b 00
+					15 01 00 00 00 00 02 0c 00
+					15 01 00 00 00 00 02 0d 13
+					15 01 00 00 00 00 02 0e 15
+					15 01 00 00 00 00 02 0f 17
+					15 01 00 00 00 00 02 10 01
+					15 01 00 00 00 00 02 11 0b
+					15 01 00 00 00 00 02 12 0c
+					15 01 00 00 00 00 02 13 89
+					15 01 00 00 00 00 02 14 8a
+					15 01 00 00 00 00 02 15 0f
+					15 01 00 00 00 00 02 16 10
+					15 01 00 00 00 00 02 17 10
+					15 01 00 00 00 00 02 18 1c
+					15 01 00 00 00 00 02 19 00
+					15 01 00 00 00 00 02 1a 00
+					15 01 00 00 00 00 02 1b 00
+					15 01 00 00 00 00 02 1c 00
+					15 01 00 00 00 00 02 1d 13
+					15 01 00 00 00 00 02 1e 15
+					15 01 00 00 00 00 02 1f 17
+					15 01 00 00 00 00 02 20 00
+					15 01 00 00 00 00 02 21 01
+					15 01 00 00 00 00 02 22 00
+					15 01 00 00 00 00 02 23 40
+					15 01 00 00 00 00 02 24 40
+					15 01 00 00 00 00 02 25 6d
+					15 01 00 00 00 00 02 26 40
+					15 01 00 00 00 00 02 27 40
+					15 01 00 00 00 00 02 29 d8
+					15 01 00 00 00 00 02 2a 2a
+					15 01 00 00 00 00 02 4b 03
+					15 01 00 00 00 00 02 4c 11
+					15 01 00 00 00 00 02 4d 10
+					15 01 00 00 00 00 02 4e 01
+					15 01 00 00 00 00 02 4f 01
+					15 01 00 00 00 00 02 50 10
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 52 80
+					15 01 00 00 00 00 02 53 00
+					15 01 00 00 00 00 02 54 07
+					15 01 00 00 00 00 02 55 25
+					15 01 00 00 00 00 02 56 00
+					15 01 00 00 00 00 02 58 07
+					15 01 00 00 00 00 02 5b 43
+					15 01 00 00 00 00 02 5c 00
+					15 01 00 00 00 00 02 5f 73
+					15 01 00 00 00 00 02 60 73
+					15 01 00 00 00 00 02 63 22
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 67 08
+					15 01 00 00 00 00 02 68 04
+					15 01 00 00 00 00 02 7a 80
+					15 01 00 00 00 00 02 7b 91
+					15 01 00 00 00 00 02 7c d8
+					15 01 00 00 00 00 02 7d 60
+					15 01 00 00 00 00 02 93 06
+					15 01 00 00 00 00 02 94 06
+					15 01 00 00 00 00 02 8a 00
+					15 01 00 00 00 00 02 9b 0f
+					15 01 00 00 00 00 02 b3 c0
+					15 01 00 00 00 00 02 b4 00
+					15 01 00 00 00 00 02 b5 00
+					15 01 00 00 00 00 02 b6 21
+					15 01 00 00 00 00 02 b7 22
+					15 01 00 00 00 00 02 b8 07
+					15 01 00 00 00 00 02 b9 07
+					15 01 00 00 00 00 02 ba 22
+					15 01 00 00 00 00 02 bd 20
+					15 01 00 00 00 00 02 be 07
+					15 01 00 00 00 00 02 bf 07
+					15 01 00 00 00 00 02 c1 6d
+					15 01 00 00 00 00 02 c4 24
+					15 01 00 00 00 00 02 e3 00
+					15 01 00 00 00 00 02 ec 00
+					15 01 00 00 00 00 02 ff 10
+					15 01 00 00 00 00 02 bb 10
+					15 01 00 00 00 00 02 35 00
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00];
+				qcom,mdss-dsi-off-command = [05 01 00 00 14
+					00 02 28 00 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-video.dtsi
new file mode 100644
index 0000000..c55f5d66
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-video.dtsi
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_nt35695b_truly_fhd_video: qcom,mdss_dsi_nt35695b_truly_fhd_video {
+		qcom,mdss-dsi-panel-name =
+				"nt35695b truly fhd video mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-post-init-delay = <1>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <1920>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <60>;
+				qcom,mdss-dsi-h-pulse-width = <12>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-v-back-porch = <2>;
+				qcom,mdss-dsi-v-front-porch = <12>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-on-command =
+					[15 01 00 00 10 00 02 ff 20
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 02 45
+					15 01 00 00 00 00 02 03 55
+					15 01 00 00 00 00 02 05 50
+					15 01 00 00 00 00 02 06 a8
+					15 01 00 00 00 00 02 07 ad
+					15 01 00 00 00 00 02 08 0c
+					15 01 00 00 00 00 02 0b aa
+					15 01 00 00 00 00 02 0c aa
+					15 01 00 00 00 00 02 0e b0
+					15 01 00 00 00 00 02 0f b3
+					15 01 00 00 00 00 02 11 28
+					15 01 00 00 00 00 02 12 10
+					15 01 00 00 00 00 02 13 01
+					15 01 00 00 00 00 02 14 4a
+					15 01 00 00 00 00 02 15 12
+					15 01 00 00 00 00 02 16 12
+					15 01 00 00 00 00 02 30 01
+					15 01 00 00 00 00 02 72 11
+					15 01 00 00 00 00 02 58 82
+					15 01 00 00 00 00 02 59 00
+					15 01 00 00 00 00 02 5a 02
+					15 01 00 00 00 00 02 5b 00
+					15 01 00 00 00 00 02 5c 82
+					15 01 00 00 00 00 02 5d 80
+					15 01 00 00 00 00 02 5e 02
+					15 01 00 00 00 00 02 5f 00
+					15 01 00 00 00 00 02 ff 24
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 0b
+					15 01 00 00 00 00 02 02 0c
+					15 01 00 00 00 00 02 03 89
+					15 01 00 00 00 00 02 04 8a
+					15 01 00 00 00 00 02 05 0f
+					15 01 00 00 00 00 02 06 10
+					15 01 00 00 00 00 02 07 10
+					15 01 00 00 00 00 02 08 1c
+					15 01 00 00 00 00 02 09 00
+					15 01 00 00 00 00 02 0a 00
+					15 01 00 00 00 00 02 0b 00
+					15 01 00 00 00 00 02 0c 00
+					15 01 00 00 00 00 02 0d 13
+					15 01 00 00 00 00 02 0e 15
+					15 01 00 00 00 00 02 0f 17
+					15 01 00 00 00 00 02 10 01
+					15 01 00 00 00 00 02 11 0b
+					15 01 00 00 00 00 02 12 0c
+					15 01 00 00 00 00 02 13 89
+					15 01 00 00 00 00 02 14 8a
+					15 01 00 00 00 00 02 15 0f
+					15 01 00 00 00 00 02 16 10
+					15 01 00 00 00 00 02 17 10
+					15 01 00 00 00 00 02 18 1c
+					15 01 00 00 00 00 02 19 00
+					15 01 00 00 00 00 02 1a 00
+					15 01 00 00 00 00 02 1b 00
+					15 01 00 00 00 00 02 1c 00
+					15 01 00 00 00 00 02 1d 13
+					15 01 00 00 00 00 02 1e 15
+					15 01 00 00 00 00 02 1f 17
+					15 01 00 00 00 00 02 20 00
+					15 01 00 00 00 00 02 21 01
+					15 01 00 00 00 00 02 22 00
+					15 01 00 00 00 00 02 23 40
+					15 01 00 00 00 00 02 24 40
+					15 01 00 00 00 00 02 25 6d
+					15 01 00 00 00 00 02 26 40
+					15 01 00 00 00 00 02 27 40
+					15 01 00 00 00 00 02 29 d8
+					15 01 00 00 00 00 02 2a 2a
+					15 01 00 00 00 00 02 4b 03
+					15 01 00 00 00 00 02 4c 11
+					15 01 00 00 00 00 02 4d 10
+					15 01 00 00 00 00 02 4e 01
+					15 01 00 00 00 00 02 4f 01
+					15 01 00 00 00 00 02 50 10
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 52 80
+					15 01 00 00 00 00 02 53 00
+					15 01 00 00 00 00 02 54 07
+					15 01 00 00 00 00 02 55 25
+					15 01 00 00 00 00 02 56 00
+					15 01 00 00 00 00 02 58 07
+					15 01 00 00 00 00 02 5b 43
+					15 01 00 00 00 00 02 5c 00
+					15 01 00 00 00 00 02 5f 73
+					15 01 00 00 00 00 02 60 73
+					15 01 00 00 00 00 02 63 22
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 67 08
+					15 01 00 00 00 00 02 68 04
+					15 01 00 00 00 00 02 7a 80
+					15 01 00 00 00 00 02 7b 91
+					15 01 00 00 00 00 02 7c d8
+					15 01 00 00 00 00 02 7d 60
+					15 01 00 00 00 00 02 93 06
+					15 01 00 00 00 00 02 94 06
+					15 01 00 00 00 00 02 8a 00
+					15 01 00 00 00 00 02 9b 0f
+					15 01 00 00 00 00 02 b3 c0
+					15 01 00 00 00 00 02 b4 00
+					15 01 00 00 00 00 02 b5 00
+					15 01 00 00 00 00 02 b6 21
+					15 01 00 00 00 00 02 b7 22
+					15 01 00 00 00 00 02 b8 07
+					15 01 00 00 00 00 02 b9 07
+					15 01 00 00 00 00 02 ba 22
+					15 01 00 00 00 00 02 bd 20
+					15 01 00 00 00 00 02 be 07
+					15 01 00 00 00 00 02 bf 07
+					15 01 00 00 00 00 02 c1 6d
+					15 01 00 00 00 00 02 c4 24
+					15 01 00 00 00 00 02 e3 00
+					15 01 00 00 00 00 02 ec 00
+					15 01 00 00 00 00 02 ff 10
+					15 01 00 00 00 00 02 bb 03
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00];
+				qcom,mdss-dsi-off-command = [05 01 00 00
+					14 00 02 28 00 05 01 00 00 78 00
+					02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
new file mode 100644
index 0000000..75cbccb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-nt36850-truly-dualmipi-wqhd-cmd.dtsi
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_dual_nt36850_truly_cmd: qcom,mdss_dsi_nt36850_truly_wqhd_cmd {
+		qcom,mdss-dsi-panel-name =
+			"Dual nt36850 cmd mode dsi truly panel without DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 50>;
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <140>;
+				qcom,mdss-dsi-h-pulse-width = <20>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <20>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <4>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-on-command = [
+					15 01 00 00 00 00 02 ff 10
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 36 00
+					15 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 03 44 03 e8
+					15 01 00 00 00 00 02 51 ff
+					15 01 00 00 00 00 02 53 2c
+					15 01 00 00 00 00 02 55 01
+					05 01 00 00 0a 00 02 20 00
+					15 01 00 00 00 00 02 bb 10
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00
+				];
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 78 00 02 28 00
+					05 01 00 00 78 00 02 10 00
+				];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi
new file mode 100644
index 0000000..9305f29
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-s6e3ha3-amoled-dualmipi-wqhd-cmd.dtsi
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_dual_s6e3ha3_amoled_cmd: qcom,mdss_dsi_s6e3ha3_amoled_wqhd_cmd {
+		qcom,mdss-dsi-panel-name =
+			"Dual s6e3ha3 amoled cmd mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-framerate = <60>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-panel-width = <720>;
+		qcom,mdss-dsi-panel-height = <2560>;
+		qcom,mdss-dsi-h-front-porch = <100>;
+		qcom,mdss-dsi-h-back-porch = <100>;
+		qcom,mdss-dsi-h-pulse-width = <40>;
+		qcom,mdss-dsi-h-sync-skew = <0>;
+		qcom,mdss-dsi-v-back-porch = <31>;
+		qcom,mdss-dsi-v-front-porch = <30>;
+		qcom,mdss-dsi-v-pulse-width = <8>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-on-command = [05 01 00 00 05 00 02 11 00
+					39 01 00 00 00 00 05 2a 00 00 05 9f
+					39 01 00 00 00 00 05 2b 00 00 09 ff
+					39 01 00 00 00 00 03 f0 5a 5a
+					39 01 00 00 00 00 02 b0 10
+					39 01 00 00 00 00 02 b5 a0
+					39 01 00 00 00 00 02 c4 03
+					39 01 00 00 00 00 0a
+						f6 42 57 37 00 aa cc d0 00 00
+					39 01 00 00 00 00 02 f9 03
+					39 01 00 00 00 00 14
+						c2 00 00 d8 d8 00 80 2b 05 08
+						0e 07 0b 05 0d 0a 15 13 20 1e
+					39 01 00 00 78 00 03 f0 a5 a5
+					39 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 02 53 20
+					39 01 00 00 00 00 02 51 60
+					05 01 00 00 05 00 02 29 00];
+		qcom,mdss-dsi-off-command = [05 01 00 00 3c 00 02 28 00
+					05 01 00 00 b4 00 02 10 00];
+		qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+		qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+		qcom,mdss-dsi-lp-mode-on = [39 00 00 00 05 00 03 f0 5a 5a
+					39 00 00 00 05 00 03 f1 5a 5a
+					39 00 00 00 05 00 03 fc 5a 5a
+					39 00 00 00 05 00 02 b0 17
+					39 00 00 00 05 00 02 cb 10
+					39 00 00 00 05 00 02 b0 2d
+					39 00 00 00 05 00 02 cb cd
+					39 00 00 00 05 00 02 b0 0e
+					39 00 00 00 05 00 02 cb 02
+					39 00 00 00 05 00 02 b0 0f
+					39 00 00 00 05 00 02 cb 09
+					39 00 00 00 05 00 02 b0 02
+					39 00 00 00 05 00 02 f2 c9
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f2 c0
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f4 aa
+					39 00 00 00 05 00 02 b0 08
+					39 00 00 00 05 00 02 b1 30
+					39 00 00 00 05 00 02 b0 09
+					39 00 00 00 05 00 02 b1 0a
+					39 00 00 00 05 00 02 b0 0d
+					39 00 00 00 05 00 02 b1 10
+					39 00 00 00 05 00 02 b0 00
+					39 00 00 00 05 00 02 f7 03
+					39 00 00 00 05 00 02 fe 30
+					39 01 00 00 05 00 02 fe b0];
+		qcom,mdss-dsi-lp-mode-off = [39 00 00 00 05 00 03 f0 5a 5a
+					39 00 00 00 05 00 03 f1 5a 5a
+					39 00 00 00 05 00 03 fc 5a 5a
+					39 00 00 00 05 00 02 b0 2d
+					39 00 00 00 05 00 02 cb 4d
+					39 00 00 00 05 00 02 b0 17
+					39 00 00 00 05 00 02 cb 04
+					39 00 00 00 05 00 02 b0 0e
+					39 00 00 00 05 00 02 cb 06
+					39 00 00 00 05 00 02 b0 0f
+					39 00 00 00 05 00 02 cb 05
+					39 00 00 00 05 00 02 b0 02
+					39 00 00 00 05 00 02 f2 b8
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f2 80
+					39 00 00 00 05 00 02 b0 03
+					39 00 00 00 05 00 02 f4 8a
+					39 00 00 00 05 00 02 b0 08
+					39 00 00 00 05 00 02 b1 10
+					39 00 00 00 05 00 02 b0 09
+					39 00 00 00 05 00 02 b1 0a
+					39 00 00 00 05 00 02 b0 0d
+					39 00 00 00 05 00 02 b1 80
+					39 00 00 00 05 00 02 b0 00
+					39 00 00 00 05 00 02 f7 03
+					39 00 00 00 05 00 02 fe 30
+					39 01 00 00 05 00 02 fe b0];
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,dcs-cmd-by-left;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <255>;
+		qcom,mdss-pan-physical-width-dimension = <68>;
+		qcom,mdss-pan-physical-height-dimension = <122>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
new file mode 100644
index 0000000..9263caf
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-1080p-cmd.dtsi
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sharp_1080_cmd: qcom,mdss_dsi_sharp_1080p_cmd {
+		qcom,mdss-dsi-panel-name = "sharp 1080p cmd mode dsi panel";
+		qcom,mdss-dsi-panel-controller = <&mdss_dsi0>;
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-panel-destination = "display_1";
+		qcom,mdss-dsi-panel-clockrate = <850000000>;
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-pan-physical-width-dimension = <64>;
+		qcom,mdss-pan-physical-height-dimension = <117>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_pwm";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <1920>;
+				qcom,mdss-dsi-h-front-porch = <0>;
+				qcom,mdss-dsi-h-back-porch = <0>;
+				qcom,mdss-dsi-h-pulse-width = <0>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <0>;
+				qcom,mdss-dsi-v-front-porch = <0>;
+				qcom,mdss-dsi-v-pulse-width = <0>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+					15 01 00 00 00 00 02 bb 10
+					15 01 00 00 00 00 02 b0 03
+					05 01 00 00 78 00 01 11
+					15 01 00 00 00 00 02 51 ff
+					15 01 00 00 00 00 02 53 24
+					15 01 00 00 00 00 02 ff 23
+					15 01 00 00 00 00 02 08 05
+					15 01 00 00 00 00 02 46 90
+					15 01 00 00 00 00 02 ff 10
+					15 01 00 00 00 00 02 ff f0
+					15 01 00 00 00 00 02 92 01
+					15 01 00 00 00 00 02 ff 10
+					/* enable TE generation */
+					15 01 00 00 00 00 02 35 00
+					05 01 00 00 28 00 01 29];
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 10 00 01 28
+					05 01 00 00 40 00 01 10];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
new file mode 100644
index 0000000..e7903aa
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-cmd.dtsi
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sharp_4k_dsc_cmd: qcom,mdss_dsi_sharp_4k_dsc_cmd {
+		qcom,mdss-dsi-panel-name = "Sharp 4k cmd mode dsc dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
+		qcom,mdss-pan-physical-width-dimension = <71>;
+		qcom,mdss-pan-physical-height-dimension = <129>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,dcs-cmd-by-left;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <3840>;
+				qcom,mdss-dsi-h-front-porch = <30>;
+				qcom,mdss-dsi-h-back-porch = <100>;
+				qcom,mdss-dsi-h-pulse-width = <4>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <7>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-jitter = <0x8 0xa>;
+
+				qcom,mdss-dsi-on-command = [
+					39 01 00 00 00 00 11 91 09 20 00 20 02
+					00 03 1c 04 21 00
+					0f 03 19 01 97
+					39 01 00 00 00 00 03 92 10 f0
+					15 01 00 00 00 00 02 90 03
+					15 01 00 00 00 00 02 03 01
+					39 01 00 00 00 00 06 f0 55 aa 52 08 04
+					15 01 00 00 00 00 02 c0 03
+					39 01 00 00 00 00 06 f0 55 aa 52 08 07
+					15 01 00 00 00 00 02 ef 01
+					39 01 00 00 00 00 06 f0 55 aa 52 08 00
+					15 01 00 00 00 00 02 b4 01
+					15 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 06 f0 55 aa 52 08 01
+					39 01 00 00 00 00 05 ff aa 55 a5 80
+					15 01 00 00 00 00 02 6f 01
+					15 01 00 00 00 00 02 f3 10
+					39 01 00 00 00 00 05 ff aa 55 a5 00
+					/* sleep out + delay 120ms */
+					05 01 00 00 78 00 01 11
+					/* display on + delay 120ms */
+					05 01 00 00 78 00 01 29
+					];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 78 00 02 28 00
+					 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <32>;
+				qcom,mdss-dsc-slice-width = <1080>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
new file mode 100644
index 0000000..5e186fb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dsc-4k-video.dtsi
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sharp_4k_dsc_video: qcom,mdss_dsi_sharp_4k_dsc_video {
+		qcom,mdss-dsi-panel-name = "Sharp 4k video mode dsc dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 100>, <0 100>, <1 100>;
+		qcom,mdss-pan-physical-width-dimension = <71>;
+		qcom,mdss-pan-physical-height-dimension = <129>;
+		qcom,mdss-dsi-tx-eot-append;
+
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <3840>;
+				qcom,mdss-dsi-h-front-porch = <30>;
+				qcom,mdss-dsi-h-back-porch = <100>;
+				qcom,mdss-dsi-h-pulse-width = <4>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <7>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+
+				qcom,mdss-dsi-on-command = [
+					39 01 00 00 00 00 11 91 09 20 00 20 02
+					00 03 1c 04 21 00
+					0f 03 19 01 97
+					39 01 00 00 00 00 03 92 10 f0
+					15 01 00 00 00 00 02 90 03
+					15 01 00 00 00 00 02 03 01
+					39 01 00 00 00 00 06 f0 55 aa 52 08 04
+					15 01 00 00 00 00 02 c0 03
+					39 01 00 00 00 00 06 f0 55 aa 52 08 07
+					15 01 00 00 00 00 02 ef 01
+					39 01 00 00 00 00 06 f0 55 aa 52 08 00
+					15 01 00 00 00 00 02 b4 10
+					15 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 06 f0 55 aa 52 08 01
+					39 01 00 00 00 00 05 ff aa 55 a5 80
+					15 01 00 00 00 00 02 6f 01
+					15 01 00 00 00 00 02 f3 10
+					39 01 00 00 00 00 05 ff aa 55 a5 00
+					/* sleep out + delay 120ms */
+					05 01 00 00 78 00 01 11
+					/* display on + delay 120ms */
+					05 01 00 00 78 00 01 29
+					];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 78 00 02 28 00
+					 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <32>;
+				qcom,mdss-dsc-slice-width = <1080>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
new file mode 100644
index 0000000..376cfb8
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sharp-dualmipi-1080p-120hz.dtsi
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_dual_sharp_1080_120hz_cmd: qcom,mdss_dual_sharp_1080p_120hz_cmd {
+		qcom,mdss-dsi-panel-name =
+			"sharp 1080p 120hz dual dsi cmd mode panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 1>, <1 10>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,cmd-sync-wait-trigger;
+		qcom,mdss-tear-check-frame-rate = <12000>;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <540>;
+				qcom,mdss-dsi-panel-height = <1920>;
+				qcom,mdss-dsi-h-front-porch = <28>;
+				qcom,mdss-dsi-h-back-porch = <4>;
+				qcom,mdss-dsi-h-pulse-width = <4>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <12>;
+				qcom,mdss-dsi-v-front-porch = <12>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <120>;
+				qcom,mdss-dsi-on-command =
+					[15 01 00 00 00 00 02 ff 10
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 ba 07
+					15 01 00 00 00 00 02 c0 00
+					15 01 00 00 00 00 02 bb 10
+					15 01 00 00 00 00 02 d9 00
+					15 01 00 00 00 00 02 ef 70
+					15 01 00 00 00 00 02 f7 80
+					39 01 00 00 00 00 06 3b 03 0e 0c 08 1c
+					15 01 00 00 00 00 02 e9 0e
+					15 01 00 00 00 00 02 ea 0c
+					15 01 00 00 00 00 02 35 00
+					15 01 00 00 00 00 02 c0 00
+					15 01 00 00 00 00 02 ff 20
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 59 6a
+					15 01 00 00 00 00 02 0b 1b
+					15 01 00 00 00 00 02 61 f7
+					15 01 00 00 00 00 02 62 6c
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 04 c8
+					15 01 00 00 00 00 02 05 1a
+					15 01 00 00 00 00 02 0d 93
+					15 01 00 00 00 00 02 0e 93
+					15 01 00 00 00 00 02 0f 7e
+					15 01 00 00 00 00 02 06 69
+					15 01 00 00 00 00 02 07 bc
+					15 01 00 00 00 00 02 10 03
+					15 01 00 00 00 00 02 11 64
+					15 01 00 00 00 00 02 12 5a
+					15 01 00 00 00 00 02 13 40
+					15 01 00 00 00 00 02 14 40
+					15 01 00 00 00 00 02 15 00
+					15 01 00 00 00 00 02 33 13
+					15 01 00 00 00 00 02 5a 40
+					15 01 00 00 00 00 02 5b 40
+					15 01 00 00 00 00 02 5e 80
+					15 01 00 00 00 00 02 ff 24
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 80
+					15 01 00 00 00 00 02 14 80
+					15 01 00 00 00 00 02 01 80
+					15 01 00 00 00 00 02 15 80
+					15 01 00 00 00 00 02 02 80
+					15 01 00 00 00 00 02 16 80
+					15 01 00 00 00 00 02 03 0a
+					15 01 00 00 00 00 02 17 0c
+					15 01 00 00 00 00 02 04 06
+					15 01 00 00 00 00 02 18 08
+					15 01 00 00 00 00 02 05 80
+					15 01 00 00 00 00 02 19 80
+					15 01 00 00 00 00 02 06 80
+					15 01 00 00 00 00 02 1a 80
+					15 01 00 00 00 00 02 07 80
+					15 01 00 00 00 00 02 1b 80
+					15 01 00 00 00 00 02 08 80
+					15 01 00 00 00 00 02 1c 80
+					15 01 00 00 00 00 02 09 80
+					15 01 00 00 00 00 02 1d 80
+					15 01 00 00 00 00 02 0a 80
+					15 01 00 00 00 00 02 1e 80
+					15 01 00 00 00 00 02 0b 1a
+					15 01 00 00 00 00 02 1f 1b
+					15 01 00 00 00 00 02 0c 16
+					15 01 00 00 00 00 02 20 17
+					15 01 00 00 00 00 02 0d 1c
+					15 01 00 00 00 00 02 21 1d
+					15 01 00 00 00 00 02 0e 18
+					15 01 00 00 00 00 02 22 19
+					15 01 00 00 00 00 02 0f 0e
+					15 01 00 00 00 00 02 23 10
+					15 01 00 00 00 00 02 10 80
+					15 01 00 00 00 00 02 24 80
+					15 01 00 00 00 00 02 11 80
+					15 01 00 00 00 00 02 25 80
+					15 01 00 00 00 00 02 12 80
+					15 01 00 00 00 00 02 26 80
+					15 01 00 00 00 00 02 13 80
+					15 01 00 00 00 00 02 27 80
+					15 01 00 00 00 00 02 74 ff
+					15 01 00 00 00 00 02 75 ff
+					15 01 00 00 00 00 02 8d 00
+					15 01 00 00 00 00 02 8e 00
+					15 01 00 00 00 00 02 8f 9c
+					15 01 00 00 00 00 02 90 0c
+					15 01 00 00 00 00 02 91 0e
+					15 01 00 00 00 00 02 d6 00
+					15 01 00 00 00 00 02 d7 20
+					15 01 00 00 00 00 02 d8 00
+					15 01 00 00 00 00 02 d9 88
+					15 01 00 00 00 00 02 e5 05
+					15 01 00 00 00 00 02 e6 10
+					15 01 00 00 00 00 02 54 06
+					15 01 00 00 00 00 02 55 05
+					15 01 00 00 00 00 02 56 04
+					15 01 00 00 00 00 02 58 03
+					15 01 00 00 00 00 02 59 33
+					15 01 00 00 00 00 02 5a 33
+					15 01 00 00 00 00 02 5b 01
+					15 01 00 00 00 00 02 5c 00
+					15 01 00 00 00 00 02 5d 01
+					15 01 00 00 00 00 02 5e 0a
+					15 01 00 00 00 00 02 5f 0a
+					15 01 00 00 00 00 02 60 0a
+					15 01 00 00 00 00 02 61 0a
+					15 01 00 00 00 00 02 62 10
+					15 01 00 00 00 00 02 63 01
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 65 00
+					15 01 00 00 00 00 02 ef 00
+					15 01 00 00 00 00 02 f0 00
+					15 01 00 00 00 00 02 6d 20
+					15 01 00 00 00 00 02 66 44
+					15 01 00 00 00 00 02 68 01
+					15 01 00 00 00 00 02 69 00
+					15 01 00 00 00 00 02 67 11
+					15 01 00 00 00 00 02 6a 06
+					15 01 00 00 00 00 02 6b 31
+					15 01 00 00 00 00 02 6c 90
+					15 01 00 00 00 00 02 ab c3
+					15 01 00 00 00 00 02 b1 49
+					15 01 00 00 00 00 02 aa 80
+					15 01 00 00 00 00 02 b0 90
+					15 01 00 00 00 00 02 b2 a4
+					15 01 00 00 00 00 02 b3 00
+					15 01 00 00 00 00 02 b4 23
+					15 01 00 00 00 00 02 b5 00
+					15 01 00 00 00 00 02 b6 00
+					15 01 00 00 00 00 02 b7 00
+					15 01 00 00 00 00 02 b8 00
+					15 01 00 00 00 00 02 b9 00
+					15 01 00 00 00 00 02 ba 00
+					15 01 00 00 00 00 02 bb 00
+					15 01 00 00 00 00 02 bc 00
+					15 01 00 00 00 00 02 bd 00
+					15 01 00 00 00 00 02 be 00
+					15 01 00 00 00 00 02 bf 00
+					15 01 00 00 00 00 02 c0 00
+					15 01 00 00 00 00 02 c7 40
+					15 01 00 00 00 00 02 c9 00
+					15 01 00 00 00 00 02 c1 2a
+					15 01 00 00 00 00 02 c2 2a
+					15 01 00 00 00 00 02 c3 00
+					15 01 00 00 00 00 02 c4 00
+					15 01 00 00 00 00 02 c5 00
+					15 01 00 00 00 00 02 c6 00
+					15 01 00 00 00 00 02 c8 ab
+					15 01 00 00 00 00 02 ca 00
+					15 01 00 00 00 00 02 cb 00
+					15 01 00 00 00 00 02 cc 20
+					15 01 00 00 00 00 02 cd 40
+					15 01 00 00 00 00 02 ce a8
+					15 01 00 00 00 00 02 cf a8
+					15 01 00 00 00 00 02 d0 00
+					15 01 00 00 00 00 02 d1 00
+					15 01 00 00 00 00 02 d2 00
+					15 01 00 00 00 00 02 d3 00
+					15 01 00 00 00 00 02 af 01
+					15 01 00 00 00 00 02 a4 1e
+					15 01 00 00 00 00 02 95 41
+					15 01 00 00 00 00 02 96 03
+					15 01 00 00 00 00 02 98 00
+					15 01 00 00 00 00 02 9a 9a
+					15 01 00 00 00 00 02 9b 03
+					15 01 00 00 00 00 02 9d 80
+					15 01 00 00 00 00 02 ff 26
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 fa d0
+					15 01 00 00 00 00 02 6b 80
+					15 01 00 00 00 00 02 6c 5c
+					15 01 00 00 00 00 02 6d 0c
+					15 01 00 00 00 00 02 6e 0e
+					15 01 00 00 00 00 02 58 01
+					15 01 00 00 00 00 02 59 15
+					15 01 00 00 00 00 02 5a 01
+					15 01 00 00 00 00 02 5b 00
+					15 01 00 00 00 00 02 5c 01
+					15 01 00 00 00 00 02 5d 2b
+					15 01 00 00 00 00 02 74 00
+					15 01 00 00 00 00 02 75 ba
+					15 01 00 00 00 00 02 81 0a
+					15 01 00 00 00 00 02 4e 81
+					15 01 00 00 00 00 02 4f 83
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 53 4d
+					15 01 00 00 00 00 02 54 03
+					15 01 00 00 00 00 02 ff e0
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 b2 81
+					15 01 00 00 00 00 02 62 28
+					15 01 00 00 00 00 02 a2 09
+					15 01 00 00 00 00 02 b3 01
+					15 01 00 00 00 00 02 ed 00
+					15 01 00 00 00 00 02 ff 10
+					05 01 00 00 78 00 01 11
+					15 01 00 00 00 00 02 ff 20
+					15 01 00 00 00 00 02 75 00
+					15 01 00 00 00 00 02 76 71
+					15 01 00 00 00 00 02 77 00
+					15 01 00 00 00 00 02 78 84
+					15 01 00 00 00 00 02 79 00
+					15 01 00 00 00 00 02 7a a5
+					15 01 00 00 00 00 02 7b 00
+					15 01 00 00 00 00 02 7c bb
+					15 01 00 00 00 00 02 7d 00
+					15 01 00 00 00 00 02 7e ce
+					15 01 00 00 00 00 02 7f 00
+					15 01 00 00 00 00 02 80 e0
+					15 01 00 00 00 00 02 81 00
+					15 01 00 00 00 00 02 82 ef
+					15 01 00 00 00 00 02 83 00
+					15 01 00 00 00 00 02 84 ff
+					15 01 00 00 00 00 02 85 01
+					15 01 00 00 00 00 02 86 0b
+					15 01 00 00 00 00 02 87 01
+					15 01 00 00 00 00 02 88 38
+					15 01 00 00 00 00 02 89 01
+					15 01 00 00 00 00 02 8a 5b
+					15 01 00 00 00 00 02 8b 01
+					15 01 00 00 00 00 02 8c 95
+					15 01 00 00 00 00 02 8d 01
+					15 01 00 00 00 00 02 8e c4
+					15 01 00 00 00 00 02 8f 02
+					15 01 00 00 00 00 02 90 0d
+					15 01 00 00 00 00 02 91 02
+					15 01 00 00 00 00 02 92 4a
+					15 01 00 00 00 00 02 93 02
+					15 01 00 00 00 00 02 94 4c
+					15 01 00 00 00 00 02 95 02
+					15 01 00 00 00 00 02 96 85
+					15 01 00 00 00 00 02 97 02
+					15 01 00 00 00 00 02 98 c3
+					15 01 00 00 00 00 02 99 02
+					15 01 00 00 00 00 02 9a e9
+					15 01 00 00 00 00 02 9b 03
+					15 01 00 00 00 00 02 9c 16
+					15 01 00 00 00 00 02 9d 03
+					15 01 00 00 00 00 02 9e 34
+					15 01 00 00 00 00 02 9f 03
+					15 01 00 00 00 00 02 a0 56
+					15 01 00 00 00 00 02 a2 03
+					15 01 00 00 00 00 02 a3 62
+					15 01 00 00 00 00 02 a4 03
+					15 01 00 00 00 00 02 a5 6c
+					15 01 00 00 00 00 02 a6 03
+					15 01 00 00 00 00 02 a7 74
+					15 01 00 00 00 00 02 a9 03
+					15 01 00 00 00 00 02 aa 80
+					15 01 00 00 00 00 02 ab 03
+					15 01 00 00 00 00 02 ac 89
+					15 01 00 00 00 00 02 ad 03
+					15 01 00 00 00 00 02 ae 8b
+					15 01 00 00 00 00 02 af 03
+					15 01 00 00 00 00 02 b0 8d
+					15 01 00 00 00 00 02 b1 03
+					15 01 00 00 00 00 02 b2 8e
+					15 01 00 00 00 00 02 b3 00
+					15 01 00 00 00 00 02 b4 71
+					15 01 00 00 00 00 02 b5 00
+					15 01 00 00 00 00 02 b6 84
+					15 01 00 00 00 00 02 b7 00
+					15 01 00 00 00 00 02 b8 a5
+					15 01 00 00 00 00 02 b9 00
+					15 01 00 00 00 00 02 ba bb
+					15 01 00 00 00 00 02 bb 00
+					15 01 00 00 00 00 02 bc ce
+					15 01 00 00 00 00 02 bd 00
+					15 01 00 00 00 00 02 be e0
+					15 01 00 00 00 00 02 bf 00
+					15 01 00 00 00 00 02 c0 ef
+					15 01 00 00 00 00 02 c1 00
+					15 01 00 00 00 00 02 c2 ff
+					15 01 00 00 00 00 02 c3 01
+					15 01 00 00 00 00 02 c4 0b
+					15 01 00 00 00 00 02 c5 01
+					15 01 00 00 00 00 02 c6 38
+					15 01 00 00 00 00 02 c7 01
+					15 01 00 00 00 00 02 c8 5b
+					15 01 00 00 00 00 02 c9 01
+					15 01 00 00 00 00 02 ca 95
+					15 01 00 00 00 00 02 cb 01
+					15 01 00 00 00 00 02 cc c4
+					15 01 00 00 00 00 02 cd 02
+					15 01 00 00 00 00 02 ce 0d
+					15 01 00 00 00 00 02 cf 02
+					15 01 00 00 00 00 02 d0 4a
+					15 01 00 00 00 00 02 d1 02
+					15 01 00 00 00 00 02 d2 4c
+					15 01 00 00 00 00 02 d3 02
+					15 01 00 00 00 00 02 d4 85
+					15 01 00 00 00 00 02 d5 02
+					15 01 00 00 00 00 02 d6 c3
+					15 01 00 00 00 00 02 d7 02
+					15 01 00 00 00 00 02 d8 e9
+					15 01 00 00 00 00 02 d9 03
+					15 01 00 00 00 00 02 da 16
+					15 01 00 00 00 00 02 db 03
+					15 01 00 00 00 00 02 dc 34
+					15 01 00 00 00 00 02 dd 03
+					15 01 00 00 00 00 02 de 56
+					15 01 00 00 00 00 02 df 03
+					15 01 00 00 00 00 02 e0 62
+					15 01 00 00 00 00 02 e1 03
+					15 01 00 00 00 00 02 e2 6c
+					15 01 00 00 00 00 02 e3 03
+					15 01 00 00 00 00 02 e4 74
+					15 01 00 00 00 00 02 e5 03
+					15 01 00 00 00 00 02 e6 80
+					15 01 00 00 00 00 02 e7 03
+					15 01 00 00 00 00 02 e8 89
+					15 01 00 00 00 00 02 e9 03
+					15 01 00 00 00 00 02 ea 8b
+					15 01 00 00 00 00 02 eb 03
+					15 01 00 00 00 00 02 ec 8d
+					15 01 00 00 00 00 02 ed 03
+					15 01 00 00 00 00 02 ee 8e
+					15 01 00 00 00 00 02 ef 00
+					15 01 00 00 00 00 02 f0 71
+					15 01 00 00 00 00 02 f1 00
+					15 01 00 00 00 00 02 f2 84
+					15 01 00 00 00 00 02 f3 00
+					15 01 00 00 00 00 02 f4 a5
+					15 01 00 00 00 00 02 f5 00
+					15 01 00 00 00 00 02 f6 bb
+					15 01 00 00 00 00 02 f7 00
+					15 01 00 00 00 00 02 f8 ce
+					15 01 00 00 00 00 02 f9 00
+					15 01 00 00 00 00 02 fa e0
+					15 01 00 00 00 00 02 ff 21
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 00
+					15 01 00 00 00 00 02 01 ef
+					15 01 00 00 00 00 02 02 00
+					15 01 00 00 00 00 02 03 ff
+					15 01 00 00 00 00 02 04 01
+					15 01 00 00 00 00 02 05 0b
+					15 01 00 00 00 00 02 06 01
+					15 01 00 00 00 00 02 07 38
+					15 01 00 00 00 00 02 08 01
+					15 01 00 00 00 00 02 09 5b
+					15 01 00 00 00 00 02 0a 01
+					15 01 00 00 00 00 02 0b 95
+					15 01 00 00 00 00 02 0c 01
+					15 01 00 00 00 00 02 0d c4
+					15 01 00 00 00 00 02 0e 02
+					15 01 00 00 00 00 02 0f 0d
+					15 01 00 00 00 00 02 10 02
+					15 01 00 00 00 00 02 11 4a
+					15 01 00 00 00 00 02 12 02
+					15 01 00 00 00 00 02 13 4c
+					15 01 00 00 00 00 02 14 02
+					15 01 00 00 00 00 02 15 85
+					15 01 00 00 00 00 02 16 02
+					15 01 00 00 00 00 02 17 c3
+					15 01 00 00 00 00 02 18 02
+					15 01 00 00 00 00 02 19 e9
+					15 01 00 00 00 00 02 1a 03
+					15 01 00 00 00 00 02 1b 16
+					15 01 00 00 00 00 02 1c 03
+					15 01 00 00 00 00 02 1d 34
+					15 01 00 00 00 00 02 1e 03
+					15 01 00 00 00 00 02 1f 56
+					15 01 00 00 00 00 02 20 03
+					15 01 00 00 00 00 02 21 62
+					15 01 00 00 00 00 02 22 03
+					15 01 00 00 00 00 02 23 6c
+					15 01 00 00 00 00 02 24 03
+					15 01 00 00 00 00 02 25 74
+					15 01 00 00 00 00 02 26 03
+					15 01 00 00 00 00 02 27 80
+					15 01 00 00 00 00 02 28 03
+					15 01 00 00 00 00 02 29 89
+					15 01 00 00 00 00 02 2a 03
+					15 01 00 00 00 00 02 2b 8b
+					15 01 00 00 00 00 02 2d 03
+					15 01 00 00 00 00 02 2f 8d
+					15 01 00 00 00 00 02 30 03
+					15 01 00 00 00 00 02 31 8e
+					15 01 00 00 00 00 02 32 00
+					15 01 00 00 00 00 02 33 71
+					15 01 00 00 00 00 02 34 00
+					15 01 00 00 00 00 02 35 84
+					15 01 00 00 00 00 02 36 00
+					15 01 00 00 00 00 02 37 a5
+					15 01 00 00 00 00 02 38 00
+					15 01 00 00 00 00 02 39 bb
+					15 01 00 00 00 00 02 3a 00
+					15 01 00 00 00 00 02 3b ce
+					15 01 00 00 00 00 02 3d 00
+					15 01 00 00 00 00 02 3f e0
+					15 01 00 00 00 00 02 40 00
+					15 01 00 00 00 00 02 41 ef
+					15 01 00 00 00 00 02 42 00
+					15 01 00 00 00 00 02 43 ff
+					15 01 00 00 00 00 02 44 01
+					15 01 00 00 00 00 02 45 0b
+					15 01 00 00 00 00 02 46 01
+					15 01 00 00 00 00 02 47 38
+					15 01 00 00 00 00 02 48 01
+					15 01 00 00 00 00 02 49 5b
+					15 01 00 00 00 00 02 4a 01
+					15 01 00 00 00 00 02 4b 95
+					15 01 00 00 00 00 02 4c 01
+					15 01 00 00 00 00 02 4d c4
+					15 01 00 00 00 00 02 4e 02
+					15 01 00 00 00 00 02 4f 0d
+					15 01 00 00 00 00 02 50 02
+					15 01 00 00 00 00 02 51 4a
+					15 01 00 00 00 00 02 52 02
+					15 01 00 00 00 00 02 53 4c
+					15 01 00 00 00 00 02 54 02
+					15 01 00 00 00 00 02 55 85
+					15 01 00 00 00 00 02 56 02
+					15 01 00 00 00 00 02 58 c3
+					15 01 00 00 00 00 02 59 02
+					15 01 00 00 00 00 02 5a e9
+					15 01 00 00 00 00 02 5b 03
+					15 01 00 00 00 00 02 5c 16
+					15 01 00 00 00 00 02 5d 03
+					15 01 00 00 00 00 02 5e 34
+					15 01 00 00 00 00 02 5f 03
+					15 01 00 00 00 00 02 60 56
+					15 01 00 00 00 00 02 61 03
+					15 01 00 00 00 00 02 62 62
+					15 01 00 00 00 00 02 63 03
+					15 01 00 00 00 00 02 64 6c
+					15 01 00 00 00 00 02 65 03
+					15 01 00 00 00 00 02 66 74
+					15 01 00 00 00 00 02 67 03
+					15 01 00 00 00 00 02 68 80
+					15 01 00 00 00 00 02 69 03
+					15 01 00 00 00 00 02 6a 89
+					15 01 00 00 00 00 02 6b 03
+					15 01 00 00 00 00 02 6c 8b
+					15 01 00 00 00 00 02 6d 03
+					15 01 00 00 00 00 02 6e 8d
+					15 01 00 00 00 00 02 6f 03
+					15 01 00 00 00 00 02 70 8e
+					15 01 00 00 00 00 02 71 00
+					15 01 00 00 00 00 02 72 71
+					15 01 00 00 00 00 02 73 00
+					15 01 00 00 00 00 02 74 84
+					15 01 00 00 00 00 02 75 00
+					15 01 00 00 00 00 02 76 a5
+					15 01 00 00 00 00 02 77 00
+					15 01 00 00 00 00 02 78 bb
+					15 01 00 00 00 00 02 79 00
+					15 01 00 00 00 00 02 7a ce
+					15 01 00 00 00 00 02 7b 00
+					15 01 00 00 00 00 02 7c e0
+					15 01 00 00 00 00 02 7d 00
+					15 01 00 00 00 00 02 7e ef
+					15 01 00 00 00 00 02 7f 00
+					15 01 00 00 00 00 02 80 ff
+					15 01 00 00 00 00 02 81 01
+					15 01 00 00 00 00 02 82 0b
+					15 01 00 00 00 00 02 83 01
+					15 01 00 00 00 00 02 84 38
+					15 01 00 00 00 00 02 85 01
+					15 01 00 00 00 00 02 86 5b
+					15 01 00 00 00 00 02 87 01
+					15 01 00 00 00 00 02 88 95
+					15 01 00 00 00 00 02 89 01
+					15 01 00 00 00 00 02 8a c4
+					15 01 00 00 00 00 02 8b 02
+					15 01 00 00 00 00 02 8c 0d
+					15 01 00 00 00 00 02 8d 02
+					15 01 00 00 00 00 02 8e 4a
+					15 01 00 00 00 00 02 8f 02
+					15 01 00 00 00 00 02 90 4c
+					15 01 00 00 00 00 02 91 02
+					15 01 00 00 00 00 02 92 85
+					15 01 00 00 00 00 02 93 02
+					15 01 00 00 00 00 02 94 c3
+					15 01 00 00 00 00 02 95 02
+					15 01 00 00 00 00 02 96 e9
+					15 01 00 00 00 00 02 97 03
+					15 01 00 00 00 00 02 98 16
+					15 01 00 00 00 00 02 99 03
+					15 01 00 00 00 00 02 9a 34
+					15 01 00 00 00 00 02 9b 03
+					15 01 00 00 00 00 02 9c 56
+					15 01 00 00 00 00 02 9d 03
+					15 01 00 00 00 00 02 9e 62
+					15 01 00 00 00 00 02 9f 03
+					15 01 00 00 00 00 02 a0 6c
+					15 01 00 00 00 00 02 a2 03
+					15 01 00 00 00 00 02 a3 74
+					15 01 00 00 00 00 02 a4 03
+					15 01 00 00 00 00 02 a5 80
+					15 01 00 00 00 00 02 a6 03
+					15 01 00 00 00 00 02 a7 89
+					15 01 00 00 00 00 02 a9 03
+					15 01 00 00 00 00 02 aa 8b
+					15 01 00 00 00 00 02 ab 03
+					15 01 00 00 00 00 02 ac 8d
+					15 01 00 00 00 00 02 ad 03
+					15 01 00 00 00 00 02 ae 8e
+					15 01 00 00 00 00 02 af 00
+					15 01 00 00 00 00 02 b0 71
+					15 01 00 00 00 00 02 b1 00
+					15 01 00 00 00 00 02 b2 84
+					15 01 00 00 00 00 02 b3 00
+					15 01 00 00 00 00 02 b4 a5
+					15 01 00 00 00 00 02 b5 00
+					15 01 00 00 00 00 02 b6 bb
+					15 01 00 00 00 00 02 b7 00
+					15 01 00 00 00 00 02 b8 ce
+					15 01 00 00 00 00 02 b9 00
+					15 01 00 00 00 00 02 ba e0
+					15 01 00 00 00 00 02 bb 00
+					15 01 00 00 00 00 02 bc ef
+					15 01 00 00 00 00 02 bd 00
+					15 01 00 00 00 00 02 be ff
+					15 01 00 00 00 00 02 bf 01
+					15 01 00 00 00 00 02 c0 0b
+					15 01 00 00 00 00 02 c1 01
+					15 01 00 00 00 00 02 c2 38
+					15 01 00 00 00 00 02 c3 01
+					15 01 00 00 00 00 02 c4 5b
+					15 01 00 00 00 00 02 c5 01
+					15 01 00 00 00 00 02 c6 95
+					15 01 00 00 00 00 02 c7 01
+					15 01 00 00 00 00 02 c8 c4
+					15 01 00 00 00 00 02 c9 02
+					15 01 00 00 00 00 02 ca 0d
+					15 01 00 00 00 00 02 cb 02
+					15 01 00 00 00 00 02 cc 4a
+					15 01 00 00 00 00 02 cd 02
+					15 01 00 00 00 00 02 ce 4c
+					15 01 00 00 00 00 02 cf 02
+					15 01 00 00 00 00 02 d0 85
+					15 01 00 00 00 00 02 d1 02
+					15 01 00 00 00 00 02 d2 c3
+					15 01 00 00 00 00 02 d3 02
+					15 01 00 00 00 00 02 d4 e9
+					15 01 00 00 00 00 02 d5 03
+					15 01 00 00 00 00 02 d6 16
+					15 01 00 00 00 00 02 d7 03
+					15 01 00 00 00 00 02 d8 34
+					15 01 00 00 00 00 02 d9 03
+					15 01 00 00 00 00 02 da 56
+					15 01 00 00 00 00 02 db 03
+					15 01 00 00 00 00 02 dc 62
+					15 01 00 00 00 00 02 dd 03
+					15 01 00 00 00 00 02 de 6c
+					15 01 00 00 00 00 02 df 03
+					15 01 00 00 00 00 02 e0 74
+					15 01 00 00 00 00 02 e1 03
+					15 01 00 00 00 00 02 e2 80
+					15 01 00 00 00 00 02 e3 03
+					15 01 00 00 00 00 02 e4 89
+					15 01 00 00 00 00 02 e5 03
+					15 01 00 00 00 00 02 e6 8b
+					15 01 00 00 00 00 02 e7 03
+					15 01 00 00 00 00 02 e8 8d
+					15 01 00 00 00 00 02 e9 03
+					15 01 00 00 00 00 02 ea 8e
+					15 01 00 00 00 00 02 FF 10
+					05 01 00 00 00 00 01 29];
+				qcom,mdss-dsi-off-command =
+					[15 01 00 00 00 00 02 ff 10
+					05 01 00 00 10 00 01 28
+					15 01 00 00 00 00 02 b0 00
+					05 01 00 00 40 00 01 10
+					15 01 00 00 00 00 02 4f 01];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
new file mode 100644
index 0000000..6fe1a3f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-cmd.dtsi
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sim_cmd: qcom,mdss_dsi_sim_cmd {
+		qcom,mdss-dsi-panel-name = "Simulator cmd mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-t-clk-post = <0x03>;
+		qcom,mdss-dsi-t-clk-pre = <0x27>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+		qcom,panel-ack-disabled;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1440>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <100>;
+				qcom,mdss-dsi-h-pulse-width = <40>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <100>;
+				qcom,mdss-dsi-v-front-porch = <100>;
+				qcom,mdss-dsi-v-pulse-width = <40>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-timings =
+					[00 21 09 09 24 23 08 08 08 03 04 00];
+				qcom,mdss-dsi-on-command =
+					[29 01 00 00 00 00 02 b0 03
+					05 01 00 00 0a 00 01 00
+					/* Soft reset, wait 10ms */
+					15 01 00 00 0a 00 02 3a 77
+					/* Set Pixel format (24 bpp) */
+					39 01 00 00 0a 00 05 2a 00 00 04 ff
+					/* Set Column address */
+					39 01 00 00 0a 00 05 2b 00 00 05 9f
+					/* Set page address */
+					15 01 00 00 0a 00 02 35 00
+					/* Set tear on */
+					39 01 00 00 0a 00 03 44 00 00
+					/* Set tear scan line */
+					15 01 00 00 0a 00 02 51 ff
+					/* write display brightness */
+					15 01 00 00 0a 00 02 53 24
+					 /* write control brightness */
+					15 01 00 00 0a 00 02 55 00
+					/* CABC brightness */
+					05 01 00 00 78 00 01 11
+					/* exit sleep mode, wait 120ms */
+					05 01 00 00 10 00 01 29];
+					/* Set display on, wait 16ms */
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <40>;
+				qcom,mdss-dsc-slice-width = <720>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+
+			timing@1 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <1920>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <460>;
+				qcom,mdss-dsi-h-pulse-width = <40>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <100>;
+				qcom,mdss-dsi-v-front-porch = <740>;
+				qcom,mdss-dsi-v-pulse-width = <40>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-timings =
+					[00 21 09 09 24 23 08 08 08 03 04 00];
+				qcom,mdss-dsi-on-command =
+					[29 01 00 00 00 00 02 b0 03
+					05 01 00 00 0a 00 01 00
+					/* Soft reset, wait 10ms */
+					15 01 00 00 0a 00 02 3a 77
+					/* Set Pixel format (24 bpp) */
+					39 01 00 00 0a 00 05 2a 00 00 04 ff
+					/* Set Column address */
+					39 01 00 00 0a 00 05 2b 00 00 05 9f
+					/* Set page address */
+					15 01 00 00 0a 00 02 35 00
+					/* Set tear on */
+					39 01 00 00 0a 00 03 44 00 00
+					/* Set tear scan line */
+					15 01 00 00 0a 00 02 51 ff
+					/* write display brightness */
+					15 01 00 00 0a 00 02 53 24
+					 /* write control brightness */
+					15 01 00 00 0a 00 02 55 00
+					/* CABC brightness */
+					05 01 00 00 78 00 01 11
+					/* exit sleep mode, wait 120ms */
+					05 01 00 00 10 00 01 29];
+					/* Set display on, wait 16ms */
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <40>;
+				qcom,mdss-dsc-slice-width = <540>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+
+			timing@2 {
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <1280>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <840>;
+				qcom,mdss-dsi-h-pulse-width = <40>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <100>;
+				qcom,mdss-dsi-v-front-porch = <1380>;
+				qcom,mdss-dsi-v-pulse-width = <40>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-timings =
+					[00 21 09 09 24 23 08 08 08 03 04 00];
+				qcom,mdss-dsi-on-command =
+					[29 01 00 00 00 00 02 b0 03
+					05 01 00 00 0a 00 01 00
+					/* Soft reset, wait 10ms */
+					15 01 00 00 0a 00 02 3a 77
+					/* Set Pixel format (24 bpp) */
+					39 01 00 00 0a 00 05 2a 00 00 04 ff
+					/* Set Column address */
+					39 01 00 00 0a 00 05 2b 00 00 05 9f
+					/* Set page address */
+					15 01 00 00 0a 00 02 35 00
+					/* Set tear on */
+					39 01 00 00 0a 00 03 44 00 00
+					/* Set tear scan line */
+					15 01 00 00 0a 00 02 51 ff
+					/* write display brightness */
+					15 01 00 00 0a 00 02 53 24
+					 /* write control brightness */
+					15 01 00 00 0a 00 02 55 00
+					/* CABC brightness */
+					05 01 00 00 78 00 01 11
+					/* exit sleep mode, wait 120ms */
+					05 01 00 00 10 00 01 29];
+					/* Set display on, wait 16ms */
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <40>;
+				qcom,mdss-dsc-slice-width = <360>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dsc375-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dsc375-cmd.dtsi
new file mode 100644
index 0000000..dbfe2f2
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dsc375-cmd.dtsi
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sim_dsc_375_cmd: qcom,mdss_dsi_sim_dsc_375_cmd {
+		qcom,mdss-dsi-panel-name =
+			"Simulator cmd mode DSC 3.75:1 dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,adjust-timer-wakeup-ms = <1>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,panel-ack-disabled;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-width = <1440>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <32>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <8>;
+				qcom,mdss-dsi-v-front-porch = <10>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-on-command = [
+					/* CMD2_P0 */
+					15 01 00 00 00 00 02 ff 20
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 02 45
+					15 01 00 00 00 00 02 05 40
+					15 01 00 00 00 00 02 06 19
+					15 01 00 00 00 00 02 07 1e
+					15 01 00 00 00 00 02 0b 73
+					15 01 00 00 00 00 02 0c 73
+					15 01 00 00 00 00 02 0e b0
+					15 01 00 00 00 00 02 0f aE
+					15 01 00 00 00 00 02 11 b8
+					15 01 00 00 00 00 02 13 00
+					15 01 00 00 00 00 02 58 80
+					15 01 00 00 00 00 02 59 01
+					15 01 00 00 00 00 02 5a 00
+					15 01 00 00 00 00 02 5b 01
+					15 01 00 00 00 00 02 5c 80
+					15 01 00 00 00 00 02 5d 81
+					15 01 00 00 00 00 02 5e 00
+					15 01 00 00 00 00 02 5f 01
+					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 68 03
+					/* CMD2_P4 */
+					15 01 00 00 00 00 02 ff 24
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 1c
+					15 01 00 00 00 00 02 01 0b
+					15 01 00 00 00 00 02 02 0c
+					15 01 00 00 00 00 02 03 01
+					15 01 00 00 00 00 02 04 0f
+					15 01 00 00 00 00 02 05 10
+					15 01 00 00 00 00 02 06 10
+					15 01 00 00 00 00 02 07 10
+					15 01 00 00 00 00 02 08 89
+					15 01 00 00 00 00 02 09 8a
+					15 01 00 00 00 00 02 0a 13
+					15 01 00 00 00 00 02 0b 13
+					15 01 00 00 00 00 02 0c 15
+					15 01 00 00 00 00 02 0d 15
+					15 01 00 00 00 00 02 0e 17
+					15 01 00 00 00 00 02 0f 17
+					15 01 00 00 00 00 02 10 1c
+					15 01 00 00 00 00 02 11 0b
+					15 01 00 00 00 00 02 12 0c
+					15 01 00 00 00 00 02 13 01
+					15 01 00 00 00 00 02 14 0f
+					15 01 00 00 00 00 02 15 10
+					15 01 00 00 00 00 02 16 10
+					15 01 00 00 00 00 02 17 10
+					15 01 00 00 00 00 02 18 89
+					15 01 00 00 00 00 02 19 8a
+					15 01 00 00 00 00 02 1a 13
+					15 01 00 00 00 00 02 1b 13
+					15 01 00 00 00 00 02 1c 15
+					15 01 00 00 00 00 02 1d 15
+					15 01 00 00 00 00 02 1e 17
+					15 01 00 00 00 00 02 1f 17
+					/* STV */
+					15 01 00 00 00 00 02 20 40
+					15 01 00 00 00 00 02 21 01
+					15 01 00 00 00 00 02 22 00
+					15 01 00 00 00 00 02 23 40
+					15 01 00 00 00 00 02 24 40
+					15 01 00 00 00 00 02 25 6d
+					15 01 00 00 00 00 02 26 40
+					15 01 00 00 00 00 02 27 40
+					/* Vend */
+					15 01 00 00 00 00 02 e0 00
+					15 01 00 00 00 00 02 dc 21
+					15 01 00 00 00 00 02 dd 22
+					15 01 00 00 00 00 02 de 07
+					15 01 00 00 00 00 02 df 07
+					15 01 00 00 00 00 02 e3 6d
+					15 01 00 00 00 00 02 e1 07
+					15 01 00 00 00 00 02 e2 07
+					/* UD */
+					15 01 00 00 00 00 02 29 d8
+					15 01 00 00 00 00 02 2a 2a
+					/* CLK */
+					15 01 00 00 00 00 02 4b 03
+					15 01 00 00 00 00 02 4c 11
+					15 01 00 00 00 00 02 4d 10
+					15 01 00 00 00 00 02 4e 01
+					15 01 00 00 00 00 02 4f 01
+					15 01 00 00 00 00 02 50 10
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 52 80
+					15 01 00 00 00 00 02 53 00
+					15 01 00 00 00 00 02 56 00
+					15 01 00 00 00 00 02 54 07
+					15 01 00 00 00 00 02 58 07
+					15 01 00 00 00 00 02 55 25
+					/* Reset XDONB */
+					15 01 00 00 00 00 02 5b 43
+					15 01 00 00 00 00 02 5c 00
+					15 01 00 00 00 00 02 5f 73
+					15 01 00 00 00 00 02 60 73
+					15 01 00 00 00 00 02 63 22
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 67 08
+					15 01 00 00 00 00 02 68 04
+					/* Resolution:1440x2560*/
+					15 01 00 00 00 00 02 72 02
+					/* mux */
+					15 01 00 00 00 00 02 7a 80
+					15 01 00 00 00 00 02 7b 91
+					15 01 00 00 00 00 02 7c d8
+					15 01 00 00 00 00 02 7d 60
+					15 01 00 00 00 00 02 7f 15
+					15 01 00 00 00 00 02 75 15
+					/* ABOFF */
+					15 01 00 00 00 00 02 b3 c0
+					15 01 00 00 00 00 02 b4 00
+					15 01 00 00 00 00 02 b5 00
+					/* Source EQ */
+					15 01 00 00 00 00 02 78 00
+					15 01 00 00 00 00 02 79 00
+					15 01 00 00 00 00 02 80 00
+					15 01 00 00 00 00 02 83 00
+					/* FP BP */
+					15 01 00 00 00 00 02 93 0a
+					15 01 00 00 00 00 02 94 0a
+					/* Inversion Type */
+					15 01 00 00 00 00 02 8a 00
+					15 01 00 00 00 00 02 9b ff
+					/* IMGSWAP =1 @PortSwap=1 */
+					15 01 00 00 00 00 02 9d b0
+					15 01 00 00 00 00 02 9f 63
+					15 01 00 00 00 00 02 98 10
+					/* FRM */
+					15 01 00 00 00 00 02 ec 00
+					/* CMD1 */
+					15 01 00 00 00 00 02 ff 10
+					/* VESA DSC PPS settings
+					 *  (1440x2560 slide 16H)
+					 */
+					39 01 00 00 00 00 11 c1 09
+					20 00 10 02 00 02 68 01 bb
+					00 0a 06 67 04 c5
+
+					39 01 00 00 00 00 03 c2 10 f0
+					/* C0h = 0x0(2 Port SDC)
+					 * 0x01(1 PortA FBC)
+					 * 0x02(MTK) 0x03(1 PortA VESA)
+					 */
+					15 01 00 00 00 00 02 c0 03
+					/* VBP+VSA=,VFP = 10H */
+					15 01 00 00 00 00 04 3b 03 0a 0a
+					/* FTE on */
+					15 01 00 00 00 00 02 35 00
+					/* EN_BK =1(auto black) */
+					15 01 00 00 00 00 02 e5 01
+					/* CMD mode(10) VDO mode(03) */
+					15 01 00 00 00 00 02 bb 10
+					/* Non Reload MTP */
+					15 01 00 00 00 00 02 fb 01
+					/* SlpOut + DispOn */
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00
+					];
+				qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+					02 28 00 05 01 00 00 78 00 02 10 00];
+
+				qcom,mdss-dsi-on-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <16>;
+				qcom,mdss-dsc-slice-width = <720>;
+				qcom,mdss-dsc-slice-per-pkt = <2>;
+				qcom,mdss-dsc-bit-per-component = <10>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+
+			timing@1 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <1920>;
+				qcom,mdss-dsi-h-front-porch = <0>;
+				qcom,mdss-dsi-h-back-porch = <0>;
+				qcom,mdss-dsi-h-pulse-width = <0>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <0>;
+				qcom,mdss-dsi-v-front-porch = <0>;
+				qcom,mdss-dsi-v-pulse-width = <0>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+					15 01 00 00 00 00 02 bb 10
+					15 01 00 00 00 00 02 b0 03
+					05 01 00 00 78 00 01 11
+					15 01 00 00 00 00 02 51 ff
+					15 01 00 00 00 00 02 53 24
+					15 01 00 00 00 00 02 ff 23
+					15 01 00 00 00 00 02 08 05
+					15 01 00 00 00 00 02 46 90
+					15 01 00 00 00 00 02 ff 10
+					15 01 00 00 00 00 02 ff f0
+					15 01 00 00 00 00 02 92 01
+					15 01 00 00 00 00 02 ff 10
+					/* enable TE generation */
+					15 01 00 00 00 00 02 35 00
+					05 01 00 00 28 00 01 29];
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 10 00 01 28
+					05 01 00 00 40 00 01 10];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <16>;
+				qcom,mdss-dsc-slice-width = <540>;
+				qcom,mdss-dsc-slice-per-pkt = <2>;
+				qcom,mdss-dsc-bit-per-component = <10>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
new file mode 100644
index 0000000..aa8c8da
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-cmd.dtsi
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_dual_sim_cmd: qcom,mdss_dsi_dual_sim_cmd {
+		qcom,mdss-dsi-panel-name = "Sim dual cmd mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-hor-line-idle = <0 40 256>,
+						<40 120 128>,
+						<120 240 64>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,panel-ack-disabled;
+		qcom,mdss-dsi-qsync-min-refresh-rate = <45>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <540>;
+				qcom,mdss-dsi-panel-height = <1920>;
+				qcom,mdss-dsi-h-front-porch = <28>;
+				qcom,mdss-dsi-h-back-porch = <4>;
+				qcom,mdss-dsi-h-pulse-width = <4>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <12>;
+				qcom,mdss-dsi-v-front-porch = <12>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <120>;
+				qcom,mdss-dsi-on-command =
+					[/* exit sleep mode, wait 0ms */
+					05 01 00 00 00 00 01 29];
+					/* Set display on, wait 16ms */
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 00 00 02 28 00
+					05 01 00 00 00 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-qsync-on-commands =
+					[15 01 00 00 00 00 02 51 00];
+				qcom,mdss-dsi-qsync-on-commands-state =
+					"dsi_hs_mode";
+				qcom,mdss-dsi-qsync-off-commands =
+					[15 01 00 00 00 00 02 51 00];
+				qcom,mdss-dsi-qsync-off-commands-state =
+					"dsi_hs_mode";
+			};
+
+			timing@1 {
+				qcom,mdss-dsi-panel-width = <1280>;
+				qcom,mdss-dsi-panel-height = <1440>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <44>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <4>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <4>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command =
+					[/* exit sleep mode, wait 0ms */
+					05 01 00 00 00 00 01 29];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 00 00 02 28 00
+					05 01 00 00 00 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-qsync-on-commands =
+					[15 01 00 00 00 00 02 51 00];
+				qcom,mdss-dsi-qsync-on-commands-state =
+					"dsi_hs_mode";
+				qcom,mdss-dsi-qsync-off-commands =
+					[15 01 00 00 00 00 02 51 00];
+				qcom,mdss-dsi-qsync-off-commands-state =
+					"dsi_hs_mode";
+			};
+
+			timing@2 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <3840>;
+				qcom,mdss-dsi-h-front-porch = <30>;
+				qcom,mdss-dsi-h-back-porch = <100>;
+				qcom,mdss-dsi-h-pulse-width = <4>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <7>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-framerate = <40>;
+				qcom,mdss-dsi-on-command =
+					[/* exit sleep mode, wait 0ms */
+					05 01 00 00 00 00 01 29];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 00 00 02 28 00
+					05 01 00 00 00 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-qsync-on-commands =
+					[15 01 00 00 00 00 02 51 00];
+				qcom,mdss-dsi-qsync-on-commands-state =
+					"dsi_hs_mode";
+				qcom,mdss-dsi-qsync-off-commands =
+					[15 01 00 00 00 00 02 51 00];
+				qcom,mdss-dsi-qsync-off-commands-state =
+					"dsi_hs_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-dsc375-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-dsc375-cmd.dtsi
new file mode 100644
index 0000000..1d1d318
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-dsc375-cmd.dtsi
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_dual_sim_dsc_375_cmd: qcom,mdss_dsi_dual_sim_dsc_375_cmd {
+		qcom,mdss-dsi-panel-name =
+			"Sim dual cmd mode DSC 3.75:1 dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,cmd-sync-wait-broadcast;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-hor-line-idle = <0 40 256>,
+						<40 120 128>,
+						<120 240 64>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-wd;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,panel-ack-disabled;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <3840>;
+				qcom,mdss-dsi-h-front-porch = <30>;
+				qcom,mdss-dsi-h-back-porch = <100>;
+				qcom,mdss-dsi-h-pulse-width = <4>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <7>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+
+				qcom,mdss-dsi-on-command = [
+					39 01 00 00 00 00 11 91 09 20 00 20 02
+					00 03 1c 04 21 00
+					0f 03 19 01 97
+					39 01 00 00 00 00 03 92 10 f0
+					15 01 00 00 00 00 02 90 03
+					15 01 00 00 00 00 02 03 01
+					39 01 00 00 00 00 06 f0 55 aa 52 08 04
+					15 01 00 00 00 00 02 c0 03
+					39 01 00 00 00 00 06 f0 55 aa 52 08 07
+					15 01 00 00 00 00 02 ef 01
+					39 01 00 00 00 00 06 f0 55 aa 52 08 00
+					15 01 00 00 00 00 02 b4 01
+					15 01 00 00 00 00 02 35 00
+					39 01 00 00 00 00 06 f0 55 aa 52 08 01
+					39 01 00 00 00 00 05 ff aa 55 a5 80
+					15 01 00 00 00 00 02 6f 01
+					15 01 00 00 00 00 02 f3 10
+					39 01 00 00 00 00 05 ff aa 55 a5 00
+					/* sleep out + delay 120ms */
+					05 01 00 00 78 00 01 11
+					/* display on + delay 120ms */
+					05 01 00 00 78 00 01 29
+					];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 78 00 02 28 00
+					 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <32>;
+				qcom,mdss-dsc-slice-width = <1080>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <10>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+
+			timing@1 {
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <2560>;
+				qcom,mdss-dsi-h-front-porch = <100>;
+				qcom,mdss-dsi-h-back-porch = <32>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <7>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-on-command = [
+					/* CMD2_P0 */
+					15 01 00 00 00 00 02 FF 20
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 01
+					15 01 00 00 00 00 02 01 55
+					15 01 00 00 00 00 02 02 45
+					15 01 00 00 00 00 02 05 40
+					15 01 00 00 00 00 02 06 19
+					15 01 00 00 00 00 02 07 1E
+					15 01 00 00 00 00 02 0B 73
+					15 01 00 00 00 00 02 0C 73
+					15 01 00 00 00 00 02 0E B0
+					15 01 00 00 00 00 02 0F AE
+					15 01 00 00 00 00 02 11 B8
+					15 01 00 00 00 00 02 13 00
+					15 01 00 00 00 00 02 58 80
+					15 01 00 00 00 00 02 59 01
+					15 01 00 00 00 00 02 5A 00
+					15 01 00 00 00 00 02 5B 01
+					15 01 00 00 00 00 02 5C 80
+					15 01 00 00 00 00 02 5D 81
+					15 01 00 00 00 00 02 5E 00
+					15 01 00 00 00 00 02 5F 01
+					15 01 00 00 00 00 02 72 31
+					15 01 00 00 00 00 02 68 03
+					/* CMD2_P4 */
+					15 01 00 00 00 00 02 ff 24
+					15 01 00 00 00 00 02 fb 01
+					15 01 00 00 00 00 02 00 1C
+					15 01 00 00 00 00 02 01 0B
+					15 01 00 00 00 00 02 02 0C
+					15 01 00 00 00 00 02 03 01
+					15 01 00 00 00 00 02 04 0F
+					15 01 00 00 00 00 02 05 10
+					15 01 00 00 00 00 02 06 10
+					15 01 00 00 00 00 02 07 10
+					15 01 00 00 00 00 02 08 89
+					15 01 00 00 00 00 02 09 8A
+					15 01 00 00 00 00 02 0A 13
+					15 01 00 00 00 00 02 0B 13
+					15 01 00 00 00 00 02 0C 15
+					15 01 00 00 00 00 02 0D 15
+					15 01 00 00 00 00 02 0E 17
+					15 01 00 00 00 00 02 0F 17
+					15 01 00 00 00 00 02 10 1C
+					15 01 00 00 00 00 02 11 0B
+					15 01 00 00 00 00 02 12 0C
+					15 01 00 00 00 00 02 13 01
+					15 01 00 00 00 00 02 14 0F
+					15 01 00 00 00 00 02 15 10
+					15 01 00 00 00 00 02 16 10
+					15 01 00 00 00 00 02 17 10
+					15 01 00 00 00 00 02 18 89
+					15 01 00 00 00 00 02 19 8A
+					15 01 00 00 00 00 02 1A 13
+					15 01 00 00 00 00 02 1B 13
+					15 01 00 00 00 00 02 1C 15
+					15 01 00 00 00 00 02 1D 15
+					15 01 00 00 00 00 02 1E 17
+					15 01 00 00 00 00 02 1F 17
+					/* STV */
+					15 01 00 00 00 00 02 20 40
+					15 01 00 00 00 00 02 21 01
+					15 01 00 00 00 00 02 22 00
+					15 01 00 00 00 00 02 23 40
+					15 01 00 00 00 00 02 24 40
+					15 01 00 00 00 00 02 25 6D
+					15 01 00 00 00 00 02 26 40
+					15 01 00 00 00 00 02 27 40
+					/* Vend */
+					15 01 00 00 00 00 02 E0 00
+					15 01 00 00 00 00 02 DC 21
+					15 01 00 00 00 00 02 DD 22
+					15 01 00 00 00 00 02 DE 07
+					15 01 00 00 00 00 02 DF 07
+					15 01 00 00 00 00 02 E3 6D
+					15 01 00 00 00 00 02 E1 07
+					15 01 00 00 00 00 02 E2 07
+					/* UD */
+					15 01 00 00 00 00 02 29 D8
+					15 01 00 00 00 00 02 2A 2A
+					/* CLK */
+					15 01 00 00 00 00 02 4B 03
+					15 01 00 00 00 00 02 4C 11
+					15 01 00 00 00 00 02 4D 10
+					15 01 00 00 00 00 02 4E 01
+					15 01 00 00 00 00 02 4F 01
+					15 01 00 00 00 00 02 50 10
+					15 01 00 00 00 00 02 51 00
+					15 01 00 00 00 00 02 52 80
+					15 01 00 00 00 00 02 53 00
+					15 01 00 00 00 00 02 56 00
+					15 01 00 00 00 00 02 54 07
+					15 01 00 00 00 00 02 58 07
+					15 01 00 00 00 00 02 55 25
+					/* Reset XDONB */
+					15 01 00 00 00 00 02 5B 43
+					15 01 00 00 00 00 02 5C 00
+					15 01 00 00 00 00 02 5F 73
+					15 01 00 00 00 00 02 60 73
+					15 01 00 00 00 00 02 63 22
+					15 01 00 00 00 00 02 64 00
+					15 01 00 00 00 00 02 67 08
+					15 01 00 00 00 00 02 68 04
+					/* Resolution:1440x2560*/
+					15 01 00 00 00 00 02 72 02
+					/* mux */
+					15 01 00 00 00 00 02 7A 80
+					15 01 00 00 00 00 02 7B 91
+					15 01 00 00 00 00 02 7C D8
+					15 01 00 00 00 00 02 7D 60
+					15 01 00 00 00 00 02 7F 15
+					15 01 00 00 00 00 02 75 15
+					/* ABOFF */
+					15 01 00 00 00 00 02 B3 C0
+					15 01 00 00 00 00 02 B4 00
+					15 01 00 00 00 00 02 B5 00
+					/* Source EQ */
+					15 01 00 00 00 00 02 78 00
+					15 01 00 00 00 00 02 79 00
+					15 01 00 00 00 00 02 80 00
+					15 01 00 00 00 00 02 83 00
+					/* FP BP */
+					15 01 00 00 00 00 02 93 0A
+					15 01 00 00 00 00 02 94 0A
+					/* Inversion Type */
+					15 01 00 00 00 00 02 8A 00
+					15 01 00 00 00 00 02 9B FF
+					/* IMGSWAP =1 @PortSwap=1 */
+					15 01 00 00 00 00 02 9D B0
+					15 01 00 00 00 00 02 9F 63
+					15 01 00 00 00 00 02 98 10
+					/* FRM */
+					15 01 00 00 00 00 02 EC 00
+					/* CMD1 */
+					15 01 00 00 00 00 02 ff 10
+					/* VBP+VSA=,VFP = 10H */
+					15 01 00 00 00 00 04 3B 03 0A 0A
+					/* FTE on */
+					15 01 00 00 00 00 02 35 00
+					/* EN_BK =1(auto black) */
+					15 01 00 00 00 00 02 E5 01
+					/* CMD mode(10) VDO mode(03) */
+					15 01 00 00 00 00 02 BB 10
+					/* Non Reload MTP */
+					15 01 00 00 00 00 02 FB 01
+					/* SlpOut + DispOn */
+					05 01 00 00 78 00 02 11 00
+					05 01 00 00 78 00 02 29 00
+					];
+				qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+					02 28 00 05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <16>;
+				qcom,mdss-dsc-slice-width = <720>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <10>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
new file mode 100644
index 0000000..a8fa762
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-dualmipi-video.dtsi
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_dual_sim_vid: qcom,mdss_dsi_dual_sim_video {
+		qcom,mdss-dsi-panel-name = "Sim dual video mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-panel-broadcast-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 20>, <0 200>, <1 20>;
+		qcom,panel-ack-disabled;
+		qcom,mdss-dsi-qsync-min-refresh-rate = <45>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1280>;
+				qcom,mdss-dsi-panel-height = <1440>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <44>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <4>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <4>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command =
+					[05 01 00 00 32 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-qsync-on-commands =
+					[15 01 00 00 00 00 02 51 00];
+				qcom,mdss-dsi-qsync-on-commands-state =
+					"dsi_hs_mode";
+				qcom,mdss-dsi-qsync-off-commands =
+					[15 01 00 00 00 00 02 51 00];
+				qcom,mdss-dsi-qsync-off-commands-state =
+					"dsi_hs_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-sec-hd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-sec-hd-cmd.dtsi
new file mode 100644
index 0000000..401e668
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-sec-hd-cmd.dtsi
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sim_sec_hd_cmd: qcom,mdss_dsi_sim_sec_hd_cmd {
+		qcom,mdss-dsi-panel-name =
+				"sim hd command mode secondary dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-traffic-mode = "burst_mode";
+		qcom,panel-ack-disabled;
+		qcom,mdss-dsi-te-using-wd;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-post-init-delay = <1>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <720>;
+				qcom,mdss-dsi-panel-height = <1280>;
+				qcom,mdss-dsi-h-front-porch = <120>;
+				qcom,mdss-dsi-h-back-porch = <60>;
+				qcom,mdss-dsi-h-pulse-width = <12>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <2>;
+				qcom,mdss-dsi-v-front-porch = <12>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+
+				qcom,mdss-dsi-on-command = [
+					/* sleep out + delay 120ms */
+					05 01 00 00 78 00 01 11
+					/* display on + delay 120ms */
+					05 01 00 00 78 00 01 29
+					];
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 78 00 02 28 00
+					05 01 00 00 78 00 02 10 00
+					];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi
new file mode 100644
index 0000000..d23cc63
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sim-video.dtsi
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sim_vid: qcom,mdss_dsi_sim_video {
+		qcom,mdss-dsi-panel-name = "Simulator video mode dsi panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-t-clk-post = <0x04>;
+		qcom,mdss-dsi-t-clk-pre = <0x1b>;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 0>, <0 0>, <1 0>;
+		qcom,panel-ack-disabled;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <640>;
+				qcom,mdss-dsi-panel-height = <480>;
+				qcom,mdss-dsi-h-front-porch = <8>;
+				qcom,mdss-dsi-h-back-porch = <8>;
+				qcom,mdss-dsi-h-pulse-width = <8>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <6>;
+				qcom,mdss-dsi-v-front-porch = <6>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-timings =
+					[00 00 00 00 00 00 00 00 00 00 00 00];
+				qcom,mdss-dsi-on-command =
+					[32 01 00 00 00 00 02 00 00];
+				qcom,mdss-dsi-off-command =
+					[22 01 00 00 00 00 02 00 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi
new file mode 100644
index 0000000..335836a
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-fhd-plus-cmd.dtsi
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sw43404_amoled_fhd_plus_cmd: qcom,mdss_dsi_sw43404_fhd_plus_cmd {
+		qcom,mdss-dsi-panel-name =
+		  "sw43404 amoled boe fhd+ panel with DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <1023>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-pan-physical-width-dimension = <68>;
+		qcom,mdss-pan-physical-height-dimension = <138>;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <2160>;
+				qcom,mdss-dsi-h-front-porch = <160>;
+				qcom,mdss-dsi-h-back-porch = <72>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-v-back-porch = <8>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-jitter = <0x1 0x1>;
+				qcom,mdss-dsi-on-command = [
+				  39 01 00 00 00 00 03 b0 a5 00
+				  07 01 00 00 00 00 02 01 00
+				  0a 01 00 00 00 00 80 11 00 00 89 30 80
+				     08 70 04 38 02 1c 02 1c 02 1c 02 00
+				     02 0e 00 20 34 29 00 07 00 0C 00 2e
+				     00 31 18 00 10 F0 03 0C 20 00 06 0B
+				     0B 33 0E 1C 2A 38 46 54 62 69 70 77
+				     79 7B 7D 7E 01 02 01 00 09 40 09 BE
+				     19 FC 19 FA 19 F8 1A 38 1A 78 1A B6
+				     2A F6 2B 34 2B 74 3B 74 6B F4 00 00
+				     00 00 00 00 00 00 00 00 00 00 00 00
+				     00 00 00 00 00 00 00 00 00 00 00 00
+				     00 00 00 00 00 00 00 00 00 00 00 00
+				     00 00
+				  39 01 00 00 00 00 03 b0 a5 00
+				  15 01 00 00 00 00 02 5e 10
+				  39 01 00 00 00 00 06 b9 bf 11 40 00 30
+				  39 01 00 00 00 00 09 F8 00 08 10 08 2D
+					   00 00 2D
+				  15 01 00 00 00 00 02 55 0c
+				  05 01 00 00 1e 00 02 11 00
+				  15 01 00 00 78 00 02 3d 01
+				  39 01 00 00 00 00 03 b0 a5 00
+				  05 01 00 00 78 00 02 35 00
+				  05 01 00 00 3c 00 02 29 00
+				];
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 14 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <540>;
+				qcom,mdss-dsc-slice-width = <540>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi
new file mode 100644
index 0000000..ee0dcb6
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-cmd.dtsi
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sw43404_amoled_cmd: qcom,mdss_dsi_sw43404_amoled_wqhd_cmd {
+		qcom,mdss-dsi-panel-name =
+			"sw43404 amoled cmd mode dsi boe panel with DSC";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-bl-max-level = <1023>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+		qcom,mdss-dsi-panel-hdr-enabled;
+		qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+			17000 15500 30000 8000 3000>;
+		qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+		qcom,mdss-dsi-panel-blackness-level = <3230>;
+		qcom,mdss-dsi-qsync-min-refresh-rate = <55>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-panel-width = <1440>;
+				qcom,mdss-dsi-panel-height = <2880>;
+				qcom,mdss-dsi-h-front-porch = <60>;
+				qcom,mdss-dsi-h-back-porch = <30>;
+				qcom,mdss-dsi-h-pulse-width = <12>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <8>;
+				qcom,mdss-dsi-v-front-porch = <8>;
+				qcom,mdss-dsi-v-pulse-width = <1>;
+				qcom,mdss-dsi-h-left-border = <0>;
+				qcom,mdss-dsi-h-right-border = <0>;
+				qcom,mdss-dsi-v-top-border = <0>;
+				qcom,mdss-dsi-v-bottom-border = <0>;
+				qcom,mdss-dsi-panel-jitter = <0x4 0x1>;
+				qcom,mdss-dsi-on-command = [
+					39 01 00 00 00 00 03 b0 a5 00
+					39 01 00 00 00 00 03 5c 42 00
+					07 01 00 00 00 00 02 01 00
+					0a 01 00 00 00 00 80 11 00 00 89 30 80
+					   0B 40 05 A0 05 A0 02 D0 02 D0 02 00
+					   02 68 00 20 9A DB 00 0A 00 0C 00 12
+					   00 0E 18 00 10 F0 03 0C 20 00 06 0B
+					   0B 33 0E 1C 2A 38 46 54 62 69 70 77
+					   79 7B 7D 7E 01 02 01 00 09 40 09 BE
+					   19 FC 19 FA 19 F8 1A 38 1A 78 1A B6
+					   2A F6 2B 34 2B 74 3B 74 6B F4 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00
+					39 01 00 00 00 00 03 b0 a5 00
+					39 01 00 00 00 00 09 F8 00 08 10 08 2D
+					   00 00 2D
+					15 01 00 00 00 00 02 55 0c
+					05 01 00 00 1e 00 02 11 00
+					39 01 00 00 00 00 03 b0 a5 00
+					05 01 00 00 78 00 02 35 00
+					05 01 00 00 3c 00 02 29 00
+				];
+
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 14 00 02 28 00
+					05 01 00 00 78 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+				qcom,mdss-dsi-h-sync-pulse = <0>;
+				qcom,mdss-dsi-qsync-on-commands =
+					[15 01 00 00 00 00 02 5a 01];
+				qcom,mdss-dsi-qsync-on-commands-state =
+					"dsi_lp_mode";
+				qcom,mdss-dsi-qsync-off-commands =
+					[15 01 00 00 00 00 02 5a 00];
+				qcom,mdss-dsi-qsync-off-commands-state =
+					"dsi_lp_mode";
+				qcom,compression-mode = "dsc";
+				qcom,mdss-dsc-slice-height = <1440>;
+				qcom,mdss-dsc-slice-width = <720>;
+				qcom,mdss-dsc-slice-per-pkt = <1>;
+				qcom,mdss-dsc-bit-per-component = <8>;
+				qcom,mdss-dsc-bit-per-pixel = <8>;
+				qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi
new file mode 100644
index 0000000..4618620
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-sw43404-amoled-dsc-wqhd-video.dtsi
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_sw43404_amoled_video: qcom,mdss_dsi_sw43404_amoled_wqhd_video {
+	qcom,mdss-dsi-panel-name =
+		"sw43404 amoled video mode dsi boe panel with DSC";
+	qcom,mdss-dsi-panel-type = "dsi_video_mode";
+	qcom,mdss-dsi-virtual-channel-id = <0>;
+	qcom,mdss-dsi-stream = <0>;
+	qcom,mdss-dsi-bpp = <24>;
+	qcom,mdss-dsi-border-color = <0>;
+	qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+	qcom,mdss-dsi-bllp-eof-power-mode;
+	qcom,mdss-dsi-bllp-power-mode;
+	qcom,mdss-dsi-lane-0-state;
+	qcom,mdss-dsi-lane-1-state;
+	qcom,mdss-dsi-lane-2-state;
+	qcom,mdss-dsi-lane-3-state;
+	qcom,mdss-dsi-dma-trigger = "trigger_sw";
+	qcom,mdss-dsi-mdp-trigger = "none";
+	qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+	qcom,adjust-timer-wakeup-ms = <1>;
+	qcom,mdss-dsi-panel-hdr-enabled;
+	qcom,mdss-dsi-panel-hdr-color-primaries = <14500 15500 32000
+		17000 15500 30000 8000 3000>;
+	qcom,mdss-dsi-panel-peak-brightness = <4200000>;
+	qcom,mdss-dsi-panel-blackness-level = <3230>;
+
+	qcom,mdss-dsi-display-timings {
+		timing@0 {
+			qcom,mdss-dsi-panel-width = <1440>;
+			qcom,mdss-dsi-panel-height = <2880>;
+			qcom,mdss-dsi-h-front-porch = <10>;
+			qcom,mdss-dsi-h-back-porch = <10>;
+			qcom,mdss-dsi-h-pulse-width = <12>;
+			qcom,mdss-dsi-h-sync-skew = <0>;
+			qcom,mdss-dsi-v-back-porch = <10>;
+			qcom,mdss-dsi-v-front-porch = <10>;
+			qcom,mdss-dsi-v-pulse-width = <1>;
+			qcom,mdss-dsi-h-left-border = <0>;
+			qcom,mdss-dsi-panel-framerate = <60>;
+			qcom,mdss-dsi-on-command = [
+			  07 01 00 00 00 00 02 01 00
+			  39 01 00 00 00 00 03 b0 a5 00
+			  39 01 00 00 00 00 06 b2 00 5d 04 80 49
+			  15 01 00 00 00 00 02 3d 10
+			  15 01 00 00 00 00 02 36 00
+			  15 01 00 00 00 00 02 55 0c
+			  39 01 00 00 00 00 09 f8 00 08 10 08 2d
+			     00 00 2d
+			  39 01 00 00 3c 00 03 51 00 00
+			  05 01 00 00 50 00 02 11 00
+			  39 01 00 00 00 00 03 b0 34 04
+			  39 01 00 00 00 00 05 c1 00 00 00 46
+			  0a 01 00 00 00 00 80 11 00 00 89 30 80
+			     0B 40 05 A0 02 d0 02 D0 02 D0 02 00
+			     02 68 00 20 4e a8 00 0A 00 0C 00 23
+			     00 1c 18 00 10 F0 03 0C 20 00 06 0B
+			     0B 33 0E 1C 2A 38 46 54 62 69 70 77
+			     79 7B 7D 7E 01 02 01 00 09 40 09 BE
+			     19 FC 19 FA 19 F8 1A 38 1A 78 1A B6
+			     2A F6 2B 34 2B 74 3B 74 6B F4 00 00
+			     00 00 00 00 00 00 00 00 00 00 00 00
+			     00 00 00 00 00 00 00 00 00 00 00 00
+			     00 00 00 00 00 00 00 00 00 00 00 00
+			     00 00
+			  05 01 00 00 78 00 02 29 00
+			];
+			qcom,mdss-dsi-off-command = [05 01 00 00 78 00
+			   02 28 00 05 01 00 00 78 00 02 10 00];
+			qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+			qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+			qcom,compression-mode = "dsc";
+			qcom,mdss-dsc-slice-height = <720>;
+			qcom,mdss-dsc-slice-width = <720>;
+			qcom,mdss-dsc-slice-per-pkt = <2>;
+			qcom,mdss-dsc-bit-per-component = <8>;
+			qcom,mdss-dsc-bit-per-pixel = <8>;
+			qcom,mdss-dsc-block-prediction-enable;
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-td4328-1080p-cmd.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-td4328-1080p-cmd.dtsi
new file mode 100644
index 0000000..9d607eb
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-td4328-1080p-cmd.dtsi
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_td4328_truly_cmd: qcom,mdss_dsi_td4328_truly_cmd {
+		qcom,mdss-dsi-panel-name =
+			"td4328 cmd mode dsi truly panel";
+		qcom,mdss-dsi-panel-type = "dsi_cmd_mode";
+
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+		qcom,mdss-dsi-te-pin-select = <1>;
+		qcom,mdss-dsi-wr-mem-start = <0x2c>;
+		qcom,mdss-dsi-wr-mem-continue = <0x3c>;
+		qcom,mdss-dsi-te-dcs-command = <1>;
+		qcom,mdss-dsi-te-check-enable;
+		qcom,mdss-dsi-te-using-te-pin;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <2160>;
+				qcom,mdss-dsi-h-front-porch = <70>;
+				qcom,mdss-dsi-h-back-porch = <40>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <10>;
+				qcom,mdss-dsi-v-front-porch = <5>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+					29 01 00 00 00 00 02 B0 00
+					29 01 00 00 00 00 04 B3 00 00 06
+					29 01 00 00 00 00 02 B4 00
+					29 01 00 00 00 00 06 B6 33 DB 80 12 00
+					29 01 00 00 00 00 08 B8 57 3D 19 1E 0A
+					   50 50
+					29 01 00 00 00 00 08 B9 6F 3D 28 3C 14
+					   C8 C8
+					29 01 00 00 00 00 08 BA B5 33 41 64 23
+					   A0 A0
+					29 01 00 00 00 00 03 BB 14 14
+					29 01 00 00 00 00 03 BC 37 32
+					29 01 00 00 00 00 03 BD 64 32
+					29 01 00 00 00 00 02 BE 04
+					29 01 00 00 00 00 02 C0 00
+					29 01 00 00 00 00 2E C1 04 48 00 00 26
+					   15 19 0B 63 D2 D9 9A 73 EF BD E7 5C
+					   6B 93 4D 22 18 8B 2A 41 00 00 00 00
+					   00 00 00 00 00 40 02 22 1B 06 03 00
+					   07 FF 00 01
+					29 01 00 00 00 00 18 C2 01 F8 70 08 68
+					   08 0C 10 00 08 30 00 00 00 00 00 00
+					   20 02 43 00 00 00
+					29 01 00 00 00 00 3F C3 87 D8 7D 87 D0
+					   00 00 00 00 00 00 04 3A 00 00 00 04
+					   44 00 00 01 01 03 28 00 01 00 01 00
+					   00 19 00 0C 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 32 00 19 00 5A
+					   02 32 00 19 00 5A 02 40 00
+					29 01 00 00 00 00 15 C4 70 00 00 00 11
+					   11 00 00 00 02 02 31 01 00 00 00 02
+					   01 01 01
+					29 01 00 00 00 00 08 C5 08 00 00 00 00
+					   70 00
+					29 01 00 00 00 00 40 C6 5B 2D 2D 07 54
+					   07 54 01 02 01 02 07 07 00 00 07 07
+					   0F 11 07 5B 00 5B 5B C2 C2 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00
+					29 01 00 00 00 00 27 C7 01 1D 2E 41 4F
+					   5A 71 80 8B 95 45 4F 5C 71 7B 88 98
+					   A6 BE 01 1D 2E 41 4F 5A 71 80 8B 95
+					   45 4F 5C 71 7B 88 98 A6 BE
+					29 01 00 00 00 00 38 C8 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00
+					29 01 00 00 00 00 14 C9 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00
+					29 01 00 00 00 00 2C CA 1C FC FC FC 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00
+					29 01 00 00 00 00 1C CB FF FF FF FF 0F
+					   00 08 00 01 00 31 F0 40 08 00 00 00
+					   00 00 00 00 00 00 00 00 00 00
+					29 01 00 00 00 00 02 CC 02
+					29 01 00 00 00 00 27 CD 10 80 37 C0 1A
+					   00 5C 02 19 90 11 88 D8 6C D8 6C 01
+					   00 00 00 32 00 32 00 5D 02 32 32 01
+					   33 00 33 00 5E 02 32 32 AF
+					29 01 00 00 00 00 1A CE 5D 40 49 53 59
+					   5E 63 68 6E 74 7E 8A 98 A8 BB D0 FF
+					   04 00 04 04 42 00 69 5A
+					29 01 00 00 00 00 03 CF 4A 1D
+					29 01 00 00 00 00 12 D0 33 57 D4 31 01
+					   10 10 10 19 19 00 00 00 00 00 00 00
+					29 01 00 00 00 00 02 D1 00
+					29 01 00 00 00 00 20 D2 10 00 00 10 75
+					   0F 03 25 20 00 00 00 00 00 00 00 00
+					   04 00 00 00 00 00 00 00 00 00 00 00
+					   00 00
+					29 01 00 00 00 00 17 D3 1B 3B BB 77 77
+					   77 BB B3 33 00 00 6D 6E C7 C7 33 BB
+					   F2 FD C6 0B 07
+					29 01 00 00 00 00 08 D4 00 00 00 00 00
+					   00 00
+					29 01 00 00 00 00 08 D5 03 00 00 02 2B
+					   02 2B
+					29 01 00 00 00 00 02 D6 01
+					29 01 00 00 00 00 22 D7 F6 FF 03 05 41
+					   24 80 1F C7 1F 1B 00 0C 07 20 00 00
+					   00 00 00 0C 00 1F 00 FC 00 00 AA 67
+					   7E 5D 06 00
+					29 01 00 00 00 00 03 D9 20 14
+					29 01 00 00 00 00 05 DD 30 06 23 65
+					29 01 00 00 00 00 05 DE 00 3F FF 50
+					29 01 00 00 00 00 06 E7 00 00 00 46 61
+					29 01 00 00 00 00 02 EA 1F
+					29 01 00 00 00 00 04 EE 41 51 00
+					29 01 00 00 00 00 03 F1 00 00
+					39 01 00 00 00 00 05 2A 00 00 04 37
+					39 01 00 00 00 00 05 2B 00 00 08 6F
+					39 01 00 00 00 00 01 2C
+					29 01 00 00 00 00 02 B0 00
+					39 01 00 00 00 00 02 51 FF
+					39 01 00 00 00 00 02 53 0C
+					39 01 00 00 00 00 02 55 00
+					15 01 00 00 00 00 02 35 00
+					05 01 00 00 96 00 01 11
+					05 01 00 00 32 00 01 29];
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 32 00 02 28 00
+					05 01 00 00 96 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/dsi-panel-td4328-1080p-video.dtsi b/arch/arm64/boot/dts/qcom/dsi-panel-td4328-1080p-video.dtsi
new file mode 100644
index 0000000..7bcc705
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/dsi-panel-td4328-1080p-video.dtsi
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+&mdss_mdp {
+	dsi_td4328_truly_video: qcom,mdss_dsi_td4328_truly_video {
+		qcom,mdss-dsi-panel-name =
+			"td4328 video mode dsi truly panel";
+		qcom,mdss-dsi-panel-type = "dsi_video_mode";
+
+		qcom,mdss-dsi-virtual-channel-id = <0>;
+		qcom,mdss-dsi-stream = <0>;
+		qcom,mdss-dsi-h-left-border = <0>;
+		qcom,mdss-dsi-h-right-border = <0>;
+		qcom,mdss-dsi-v-top-border = <0>;
+		qcom,mdss-dsi-v-bottom-border = <0>;
+		qcom,mdss-dsi-bpp = <24>;
+		qcom,mdss-dsi-color-order = "rgb_swap_rgb";
+		qcom,mdss-dsi-underflow-color = <0xff>;
+		qcom,mdss-dsi-border-color = <0>;
+		qcom,mdss-dsi-h-sync-pulse = <0>;
+		qcom,mdss-dsi-traffic-mode = "non_burst_sync_event";
+		qcom,mdss-dsi-lane-map = "lane_map_0123";
+		qcom,mdss-dsi-bllp-eof-power-mode;
+		qcom,mdss-dsi-bllp-power-mode;
+		qcom,mdss-dsi-tx-eot-append;
+		qcom,mdss-dsi-lane-0-state;
+		qcom,mdss-dsi-lane-1-state;
+		qcom,mdss-dsi-lane-2-state;
+		qcom,mdss-dsi-lane-3-state;
+		qcom,mdss-dsi-dma-trigger = "trigger_sw";
+		qcom,mdss-dsi-mdp-trigger = "none";
+		qcom,mdss-dsi-lp11-init;
+		qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
+		qcom,mdss-dsi-bl-min-level = <1>;
+		qcom,mdss-dsi-bl-max-level = <4095>;
+		qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
+
+		qcom,mdss-dsi-display-timings {
+			timing@0 {
+				qcom,mdss-dsi-panel-width = <1080>;
+				qcom,mdss-dsi-panel-height = <2160>;
+				qcom,mdss-dsi-h-front-porch = <70>;
+				qcom,mdss-dsi-h-back-porch = <40>;
+				qcom,mdss-dsi-h-pulse-width = <16>;
+				qcom,mdss-dsi-h-sync-skew = <0>;
+				qcom,mdss-dsi-v-back-porch = <10>;
+				qcom,mdss-dsi-v-front-porch = <5>;
+				qcom,mdss-dsi-v-pulse-width = <2>;
+				qcom,mdss-dsi-panel-framerate = <60>;
+				qcom,mdss-dsi-on-command = [
+					29 01 00 00 00 00 02 B0 00
+					29 01 00 00 00 00 04 B3 31 00 06
+					29 01 00 00 00 00 02 B4 00
+					29 01 00 00 00 00 06 B6 33 DB 80 12 00
+					29 01 00 00 00 00 08 B8 57 3D 19 1E 0A
+					   50 50
+					29 01 00 00 00 00 08 B9 6F 3D 28 3C 14
+					   C8 C8
+					29 01 00 00 00 00 08 BA B5 33 41 64 23
+					   A0 A0
+					29 01 00 00 00 00 03 BB 14 14
+					29 01 00 00 00 00 03 BC 37 32
+					29 01 00 00 00 00 03 BD 64 32
+					29 01 00 00 00 00 02 BE 04
+					29 01 00 00 00 00 02 C0 00
+					29 01 00 00 00 00 2E C1 04 48 00 00 26
+					   15 19 0B 63 D2 D9 9A 73 EF BD E7 5C
+					   6B 93 4D 22 18 8B 2A 41 00 00 00 00
+					   00 00 00 00 00 40 02 22 1B 06 03 00
+					   07 FF 00 01
+					29 01 00 00 00 00 18 C2 01 F8 70 08 68
+					   08 0C 10 00 08 30 00 00 00 00 00 00
+					   20 02 43 00 00 00
+					29 01 00 00 00 00 3F C3 87 D8 7D 87 D0
+					   00 00 00 00 00 00 04 3A 00 00 00 04
+					   44 00 00 01 01 03 28 00 01 00 01 00
+					   00 19 00 0C 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 32 00 19 00 5A
+					   02 32 00 19 00 5A 02 40 00
+					29 01 00 00 00 00 15 C4 70 00 00 00 11
+					   11 00 00 00 02 02 31 01 00 00 00 02
+					   01 01 01
+					29 01 00 00 00 00 08 C5 08 00 00 00 00
+					   70 00
+					29 01 00 00 00 00 40 C6 5B 2D 2D 07 54
+					   07 54 01 02 01 02 07 07 00 00 07 07
+					   0F 11 07 5B 00 5B 5B C2 C2 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00
+					29 01 00 00 00 00 27 C7 01 1D 2E 41 4F
+					   5A 71 80 8B 95 45 4F 5C 71 7B 88 98
+					   A6 BE 01 1D 2E 41 4F 5A 71 80 8B 95
+					   45 4F 5C 71 7B 88 98 A6 BE
+					29 01 00 00 00 00 38 C8 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00
+					29 01 00 00 00 00 14 C9 00 00 00 00 00
+					   FC 00 00 00 00 00 FC 00 00 00 00 00
+					   FC 00
+					29 01 00 00 00 00 2C CA 1C FC FC FC 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00 00 00 00 00 00 00 00 00 00 00
+					   00 00
+					29 01 00 00 00 00 1C CB FF FF FF FF 0F
+					   00 08 00 01 00 31 F0 40 08 00 00 00
+					   00 00 00 00 00 00 00 00 00 00
+					29 01 00 00 00 00 02 CC 02
+					29 01 00 00 00 00 27 CD 10 80 37 C0 1A
+					   00 5C 02 19 90 11 88 D8 6C D8 6C 01
+					   00 00 00 32 00 32 00 5D 02 32 32 01
+					   33 00 33 00 5E 02 32 32 AF
+					29 01 00 00 00 00 1A CE 5D 40 49 53 59
+					   5E 63 68 6E 74 7E 8A 98 A8 BB D0 FF
+					   04 00 04 04 42 00 69 5A
+					29 01 00 00 00 00 03 CF 4A 1D
+					29 01 00 00 00 00 12 D0 33 57 D4 31 01
+					   10 10 10 19 19 00 00 00 00 00 00 00
+					29 01 00 00 00 00 02 D1 00
+					29 01 00 00 00 00 20 D2 10 00 00 10 75
+					   0F 03 25 20 00 00 00 00 00 00 00 00
+					   04 00 00 00 00 00 00 00 00 00 00 00
+					   00 00
+					29 01 00 00 00 00 17 D3 1B 3B BB 77 77
+					   77 BB B3 33 00 00 6D 6E DB DB 33 BB
+					   F2 FD C6 0B 07
+					29 01 00 00 00 00 08 D4 00 00 00 00 00
+					   00 00
+					29 01 00 00 00 00 08 D5 03 00 00 02 40
+					   02 40
+					29 01 00 00 00 00 02 D6 01
+					29 01 00 00 00 00 22 D7 F6 FF 03 05 41
+					   24 80 1F C7 1F 1B 00 0C 07 20 00 00
+					   00 00 00 0C 00 1F 00 FC 00 00 AA 67
+					   7E 5D 06 00
+					29 01 00 00 00 00 03 D9 20 14
+					29 01 00 00 00 00 05 DD 30 06 23 65
+					29 01 00 00 00 00 05 DE 00 3F FF 90
+					29 01 00 00 00 00 06 E7 00 00 00 46 61
+					29 01 00 00 00 00 02 EA 1F
+					29 01 00 00 00 00 04 EE 41 51 00
+					29 01 00 00 00 00 03 F1 00 00
+					39 01 00 00 00 00 05 2A 00 00 04 37
+					39 01 00 00 00 00 05 2B 00 00 08 6F
+					39 01 00 00 00 00 01 2C
+					39 01 00 00 00 00 02 51 FF
+					39 01 00 00 00 00 02 53 0C
+					39 01 00 00 00 00 02 55 00
+					05 01 00 00 96 00 01 11
+					05 01 00 00 32 00 01 29];
+				qcom,mdss-dsi-off-command = [
+					05 01 00 00 32 00 02 28 00
+					05 01 00 00 96 00 02 10 00];
+				qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+				qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-ion.dtsi b/arch/arm64/boot/dts/qcom/kona-ion.dtsi
index b21d5e8..f3be668 100644
--- a/arch/arm64/boot/dts/qcom/kona-ion.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-ion.dtsi
@@ -10,12 +10,12 @@
 		#size-cells = <0>;
 
 		system_heap: qcom,ion-heap@25 {
-			reg = <0x25>;
+			reg = <25>;
 			qcom,ion-heap-type = "SYSTEM";
 		};
 
 		system_secure_heap: qcom,ion-heap@9 {
-			reg = <0x9>;
+			reg = <9>;
 			qcom,ion-heap-type = "SYSTEM_SECURE";
 		};
 	};
diff --git a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
index da688b6..0889f48 100644
--- a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
@@ -142,7 +142,7 @@
 
 				config {
 					pins = "gpio145";
-					drive-strength = <8>;
+					drive-strength = <16>;
 					bias-disable;
 				};
 			};
diff --git a/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi b/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
new file mode 100644
index 0000000..814f916
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-sde-display.dtsi
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&soc {
+	sde_wb: qcom,wb-display@0 {
+		compatible = "qcom,wb-display";
+		cell-index = <0>;
+		label = "wb_display";
+	};
+};
+
+&mdss_mdp {
+	connectors = <&sde_wb>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
new file mode 100644
index 0000000..b351407
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+&soc {
+	mdss_dsi0_pll: qcom,mdss_dsi_pll@ae94900 {
+		compatible = "qcom,mdss_dsi_pll_7nm";
+		label = "MDSS DSI 0 PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+		reg = <0xae94900 0x260>,
+		      <0xae94400 0x800>,
+		      <0xaf03000 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
+		clock-names = "iface_clk";
+		clock-rate = <0>;
+		gdsc-supply = <&mdss_core_gdsc>;
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+
+	mdss_dsi1_pll: qcom,mdss_dsi_pll@ae96900 {
+		compatible = "qcom,mdss_dsi_pll_7nm";
+		label = "MDSS DSI 1 PLL";
+		cell-index = <1>;
+		#clock-cells = <1>;
+		reg = <0xae96900 0x260>,
+		      <0xae96400 0x800>,
+		      <0xaf03000 0x8>;
+		reg-names = "pll_base", "phy_base", "gdsc_base";
+		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
+		clock-names = "iface_clk";
+		clock-rate = <0>;
+		gdsc-supply = <&mdss_core_gdsc>;
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "gdsc";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+	};
+
+	mdss_dp_pll: qcom,mdss_dp_pll@c011000 {
+		compatible = "qcom,mdss_dp_pll_7nm";
+		label = "MDSS DP PLL";
+		cell-index = <0>;
+		#clock-cells = <1>;
+
+		reg = <0x088ea000 0x200>,
+		      <0x088eaa00 0x200>,
+		      <0x088ea200 0x200>,
+		      <0x088ea600 0x200>,
+		      <0xaf03000 0x8>;
+		reg-names = "pll_base", "phy_base", "ln_tx0_base",
+			"ln_tx1_base", "gdsc_base";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-sde.dtsi b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
new file mode 100644
index 0000000..7c17409
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+&soc {
+	mdss_mdp: qcom,mdss_mdp@ae00000 {
+		compatible = "qcom,sde-kms";
+		reg = <0x0ae00000 0x84208>,
+		      <0x0aeb0000 0x2008>,
+		      <0x0aeac000 0x214>;
+		reg-names = "mdp_phys",
+			"vbif_phys",
+			"regdma_phys";
+
+		clocks =
+			<&clock_gcc GCC_DISP_AHB_CLK>,
+			<&clock_gcc GCC_DISP_HF_AXI_CLK>,
+			<&clock_gcc GCC_DISP_SF_AXI_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_MDP_LUT_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_ROT_CLK>;
+		clock-names = "gcc_iface", "gcc_bus", "gcc_nrt_bus",
+				"iface_clk", "core_clk", "vsync_clk",
+				"lut_clk", "rot_clk";
+		clock-rate = <0 0 0 0 300000000 19200000 300000000>;
+		clock-max-rate = <0 0 0 0 460000000 19200000 460000000>;
+
+		/* interrupt config */
+		interrupts = <0 83 0>;
+		interrupt-controller;
+		iommus = <&apps_smmu 0x820 0x402>;
+
+		/* hw blocks */
+		qcom,sde-off = <0x1000>;
+		qcom,sde-len = <0x494>;
+
+		qcom,sde-ctl-off = <0x2000 0x2200 0x2400
+				     0x2600 0x2800 0x2a00>;
+		qcom,sde-ctl-size = <0x1dc>;
+		qcom,sde-ctl-display-pref = "primary", "primary", "none",
+			    "none", "none";
+
+		qcom,sde-mixer-off = <0x45000 0x46000 0x47000
+				      0x48000 0x49000 0x4a000>;
+		qcom,sde-mixer-size = <0x320>;
+		qcom,sde-mixer-display-pref = "primary", "primary", "none",
+					      "none", "none", "none";
+
+		qcom,sde-dspp-top-off = <0x1300>;
+		qcom,sde-dspp-top-size = <0x80>;
+		qcom,sde-dspp-off = <0x55000 0x57000 0x59000 0x5b000>;
+		qcom,sde-dspp-size = <0x1800>;
+
+		qcom,sde-dest-scaler-top-off = <0x00061000>;
+		qcom,sde-dest-scaler-top-size = <0x1c>;
+		qcom,sde-dest-scaler-off = <0x800 0x1000>;
+		qcom,sde-dest-scaler-size = <0x800>;
+
+		qcom,sde-wb-off = <0x66000>;
+		qcom,sde-wb-size = <0x2c8>;
+		qcom,sde-wb-xin-id = <6>;
+		qcom,sde-wb-id = <2>;
+		qcom,sde-wb-clk-ctrl = <0x3b8 24>;
+
+		qcom,sde-intf-off = <0x6b000 0x6b800
+					0x6c000 0x6c800>;
+		qcom,sde-intf-size = <0x2b8>;
+		qcom,sde-intf-type = "dp", "dsi", "dsi", "dp";
+
+		qcom,sde-pp-off = <0x71000 0x71800
+					  0x72000 0x72800 0x73000 0x73800>;
+		qcom,sde-pp-slave = <0x0 0x0 0x0 0x0 0x0 0x0>;
+		qcom,sde-pp-size = <0xd4>;
+		qcom,sde-pp-merge-3d-id = <0x0 0x0 0x1 0x1 0x2 0x2>;
+
+		qcom,sde-merge-3d-off = <0x84000 0x84100 0x84200>;
+		qcom,sde-merge-3d-size = <0x100>;
+
+		qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0 0x0 0x0>;
+
+		qcom,sde-cdm-off = <0x7a200>;
+		qcom,sde-cdm-size = <0x400>;
+
+		qcom,sde-dsc-off = <0x81000 0x81400 0x81800 0x81c00>;
+		qcom,sde-dsc-size = <0x140>;
+
+		qcom,sde-dither-off = <0x30e0 0x30e0 0x30e0
+							0x30e0 0x30e0 0x30e0>;
+		qcom,sde-dither-version = <0x00010000>;
+		qcom,sde-dither-size = <0x20>;
+
+		qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
+					"dma", "dma", "dma", "dma";
+
+		qcom,sde-sspp-off = <0x5000 0x7000 0x9000 0xb000
+					0x25000 0x27000 0x29000 0x2b000>;
+		qcom,sde-sspp-src-size = <0x1f8>;
+
+		qcom,sde-sspp-xin-id = <0 4 8 12
+					1 5 9 13>;
+		qcom,sde-sspp-excl-rect = <1 1 1 1
+						1 1 1 1>;
+		qcom,sde-sspp-smart-dma-priority = <5 6 7 8 1 2 3 4>;
+		qcom,sde-smart-dma-rev = "smart_dma_v2p5";
+
+		qcom,sde-mixer-pair-mask = <2 1 4 3 6 5>;
+
+		qcom,sde-mixer-blend-op-off = <0x20 0x38 0x50 0x68 0x80 0x98
+						0xb0 0xc8 0xe0 0xf8 0x110>;
+
+		qcom,sde-max-per-pipe-bw-kbps = <4500000 4500000
+						 4500000 4500000
+						 4500000 4500000
+						 4500000 4500000>;
+
+		/* offsets are relative to "mdp_phys + qcom,sde-off */
+		qcom,sde-sspp-clk-ctrl =
+				<0x2ac 0>, <0x2b4 0>, <0x2bc 0>, <0x2c4 0>,
+				 <0x2ac 8>, <0x2b4 8>, <0x2bc 8>, <0x2c4 8>;
+		qcom,sde-sspp-csc-off = <0x1a00>;
+		qcom,sde-csc-type = "csc-10bit";
+		qcom,sde-qseed-type = "qseedv3";
+		qcom,sde-sspp-qseed-off = <0xa00>;
+		qcom,sde-mixer-linewidth = <2560>;
+		qcom,sde-sspp-linewidth = <4096>;
+		qcom,sde-wb-linewidth = <4096>;
+		qcom,sde-mixer-blendstages = <0xb>;
+		qcom,sde-highest-bank-bit = <0x2>;
+		qcom,sde-ubwc-version = <0x300>;
+		qcom,sde-ubwc-bw-calc-version = <0x1>;
+		qcom,sde-panic-per-pipe;
+		qcom,sde-has-cdp;
+		qcom,sde-has-src-split;
+		qcom,sde-pipe-order-version = <0x1>;
+		qcom,sde-has-dim-layer;
+		qcom,sde-has-dest-scaler;
+		qcom,sde-max-dest-scaler-input-linewidth = <2048>;
+		qcom,sde-max-dest-scaler-output-linewidth = <2560>;
+		qcom,sde-max-bw-low-kbps = <11900000>;
+		qcom,sde-max-bw-high-kbps = <11900000>;
+		qcom,sde-min-core-ib-kbps = <2400000>;
+		qcom,sde-min-llcc-ib-kbps = <800000>;
+		qcom,sde-min-dram-ib-kbps = <800000>;
+		qcom,sde-dram-channels = <2>;
+		qcom,sde-num-nrt-paths = <0>;
+
+		qcom,sde-vbif-off = <0>;
+		qcom,sde-vbif-size = <0x1040>;
+		qcom,sde-vbif-id = <0>;
+
+		qcom,sde-cdp-setting = <1 1>, <1 0>;
+
+		qcom,sde-qos-cpu-mask = <0x3>;
+		qcom,sde-qos-cpu-dma-latency = <300>;
+
+		/* offsets are relative to "mdp_phys + qcom,sde-off */
+
+		qcom,sde-reg-dma-off = <0>;
+		qcom,sde-reg-dma-version = <0x00010001>;
+		qcom,sde-reg-dma-trigger-off = <0x119c>;
+
+		qcom,sde-secure-sid-mask = <0x4200801>;
+
+		qcom,sde-sspp-vig-blocks {
+			qcom,sde-vig-csc-off = <0x1a00>;
+			qcom,sde-vig-qseed-off = <0xa00>;
+			qcom,sde-vig-qseed-size = <0xa0>;
+			qcom,sde-vig-gamut = <0x1d00 0x00050000>;
+			qcom,sde-vig-igc = <0x1d00 0x00050000>;
+			qcom,sde-vig-inverse-pma;
+		};
+
+		qcom,sde-sspp-dma-blocks {
+			dgm@0 {
+				qcom,sde-dma-igc = <0x400 0x00050000>;
+				qcom,sde-dma-gc = <0x600 0x00050000>;
+				qcom,sde-dma-inverse-pma;
+				qcom,sde-dma-csc-off = <0x200>;
+			};
+
+			dgm@1 {
+				qcom,sde-dma-igc = <0x1400 0x00050000>;
+				qcom,sde-dma-gc = <0x600 0x00050000>;
+				qcom,sde-dma-inverse-pma;
+				qcom,sde-dma-csc-off = <0x1200>;
+			};
+		};
+
+		qcom,sde-dspp-blocks {
+			qcom,sde-dspp-igc = <0x0 0x00030001>;
+			qcom,sde-dspp-hsic = <0x800 0x00010007>;
+			qcom,sde-dspp-memcolor = <0x880 0x00010007>;
+			qcom,sde-dspp-hist = <0x800 0x00010007>;
+			qcom,sde-dspp-sixzone= <0x900 0x00010007>;
+			qcom,sde-dspp-vlut = <0xa00 0x00010008>;
+			qcom,sde-dspp-gamut = <0x1000 0x00040001>;
+			qcom,sde-dspp-pcc = <0x1700 0x00040000>;
+			qcom,sde-dspp-gc = <0x17c0 0x00010008>;
+			qcom,sde-dspp-dither = <0x82c 0x00010007>;
+		};
+
+		qcom,platform-supply-entries {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			qcom,platform-supply-entry@0 {
+				reg = <0>;
+				qcom,supply-name = "mmcx";
+				qcom,supply-min-voltage = <0>;
+				qcom,supply-max-voltage = <0>;
+				qcom,supply-enable-load = <0>;
+				qcom,supply-disable-load = <0>;
+			};
+		};
+
+		smmu_sde_sec: qcom,smmu_sde_sec_cb {
+			compatible = "qcom,smmu_sde_sec";
+			iommus = <&apps_smmu 0x821 0x400>;
+		};
+
+		/* data and reg bus scale settings */
+		qcom,sde-data-bus {
+			qcom,msm-bus,name = "mdss_sde";
+			qcom,msm-bus,num-cases = <3>;
+			qcom,msm-bus,num-paths = <2>;
+			qcom,msm-bus,vectors-KBps =
+				<22 512 0 0>, <23 512 0 0>,
+				<22 512 0 6400000>, <23 512 0 6400000>,
+				<22 512 0 6400000>, <23 512 0 6400000>;
+		};
+
+		qcom,sde-reg-bus {
+			qcom,msm-bus,name = "mdss_reg";
+			qcom,msm-bus,num-cases = <4>;
+			qcom,msm-bus,num-paths = <1>;
+			qcom,msm-bus,vectors-KBps =
+				<1 590 0 0>,
+				<1 590 0 76800>,
+				<1 590 0 150000>,
+				<1 590 0 300000>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 6074cc0..9c5df6b 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -18,6 +18,7 @@
 #include <dt-bindings/msm/msm-bus-ids.h>
 #include <dt-bindings/soc/qcom,ipcc.h>
 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
+#include <dt-bindings/gpio/gpio.h>
 
 #include "kona-regulators.dtsi"
 
@@ -43,6 +44,7 @@
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_0>;
+			qcom,freq-domain = <&cpufreq_hw 0 4>;
 			L2_0: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -65,6 +67,7 @@
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_1>;
+			qcom,freq-domain = <&cpufreq_hw 0 4>;
 			L2_1: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -81,6 +84,7 @@
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_2>;
+			qcom,freq-domain = <&cpufreq_hw 0 4>;
 			L2_2: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -97,6 +101,7 @@
 			cache-size = <0x8000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_3>;
+			qcom,freq-domain = <&cpufreq_hw 0 4>;
 			L2_3: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -113,6 +118,7 @@
 			cache-size = <0x10000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_4>;
+			qcom,freq-domain = <&cpufreq_hw 1 4>;
 			L2_4: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -129,6 +135,7 @@
 			cache-size = <0x10000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_5>;
+			qcom,freq-domain = <&cpufreq_hw 1 4>;
 			L2_5: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -145,6 +152,7 @@
 			cache-size = <0x10000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_6>;
+			qcom,freq-domain = <&cpufreq_hw 1 4>;
 			L2_6: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x20000>;
@@ -161,6 +169,7 @@
 			cache-size = <0x10000>;
 			cpu-release-addr = <0x0 0x90000000>;
 			next-level-cache = <&L2_7>;
+			qcom,freq-domain = <&cpufreq_hw 2 4>;
 			L2_7: l2-cache {
 			      compatible = "arm,arch-cache";
 			      cache-size = <0x80000>;
@@ -208,13 +217,28 @@
 		};
 	};
 
+
 	cpu_pmu: cpu-pmu {
 		compatible = "arm,armv8-pmuv3";
 		qcom,irq-is-percpu;
 		interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>;
 	};
 
-	soc: soc { };
+	soc: soc {
+		cpufreq_hw: qcom,cpufreq-hw {
+			compatible = "qcom,cpufreq-hw";
+			reg = <0x18591000 0x1000>, <0x18592000 0x1000>,
+				<0x18593000 0x1000>;
+			reg-names = "freq-domain0", "freq-domain1",
+				"freq-domain2";
+
+			clocks = <&clock_xo>, <&clock_gcc GPLL0>;
+			clock-names = "xo", "cpu_clk";
+
+			#freq-domain-cells = <2>;
+		};
+	};
+
 	firmware: firmware {
 		android {
 			compatible = "android,firmware";
@@ -232,7 +256,6 @@
 		};
 	};
 
-
 	reserved-memory {
 		#address-cells = <2>;
 		#size-cells = <2>;
@@ -248,6 +271,12 @@
 			reg = <0x0 0x80700000 0x0 0x140000>;
 		};
 
+		cmd_db: reserved-memory@80820000 {
+			reg = <0x0 0x80820000 0x0 0x20000>;
+			compatible = "qcom,cmd-db";
+			no-map;
+		};
+
 		smem_mem: smem_region@80900000 {
 			no-map;
 			reg = <0x0 0x80900000 0x0 0x200000>;
@@ -263,12 +292,6 @@
 			reg = <0x0 0x81e00000 0x0 0x2600000>;
 		};
 
-		cmd_db: reserved-memory@85fe0000 {
-			reg = <0x0 0x85fe0000 0x0 0x20000>;
-			compatible = "qcom,cmd-db";
-			no-map;
-		};
-
 		pil_camera_mem: pil_camera_region@86000000 {
 			compatible = "removed-dma-pool";
 			no-map;
@@ -463,6 +486,20 @@
 		};
 	};
 
+	qcom,devfreq-l3 {
+		compatible = "qcom,devfreq-fw";
+		reg = <0x18590000 0x4>, <0x18590100 0xa0>, <0x18590320 0x4>;
+		reg-names = "en-base", "ftbl-base", "perf-base";
+
+		qcom,cpu0-l3 {
+			compatible = "qcom,devfreq-fw-voter";
+		};
+
+		qcom,cpu4-l3 {
+			compatible = "qcom,devfreq-fw-voter";
+		};
+	};
+
 	qcom,msm-imem@146bf000 {
 		compatible = "qcom,msm-imem";
 		reg = <0x146bf000 0x1000>;
@@ -528,7 +565,7 @@
 		qcom,ap2mdm-errfatal-gpio = <&tlmm 57 0x00>;
 		qcom,mdm2ap-status-gpio   = <&tlmm 3 0x00>;
 		qcom,ap2mdm-status-gpio   = <&tlmm 56 0x00>;
-		qcom,ap2mdm-soft-reset-gpio = <&tlmm 145 0x00>;
+		qcom,ap2mdm-soft-reset-gpio = <&tlmm 145 GPIO_ACTIVE_LOW>;
 		qcom,mdm-link-info = "0306_02.01.00";
 		status = "ok";
 	};
@@ -1146,6 +1183,95 @@
 			};
 		};
 	};
+
+	qcom,lpass@17300000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x17300000 0x00100>;
+
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
+		qcom,proxy-reg-names = "vdd_cx";
+
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <1>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <423>;
+		qcom,sysmon-id = <1>;
+		qcom,ssctl-instance-id = <0x14>;
+		qcom,firmware-name = "adsp";
+		memory-region = <&pil_adsp_mem>;
+		qcom,complete-ramdump;
+
+		/* Inputs from lpass */
+		interrupts-extended = <&pdc 96 IRQ_TYPE_LEVEL_HIGH>,
+				<&adsp_smp2p_in 0 0>,
+				<&adsp_smp2p_in 2 0>,
+				<&adsp_smp2p_in 1 0>,
+				<&adsp_smp2p_in 3 0>;
+
+		interrupt-names = "qcom,wdog",
+				"qcom,err-fatal",
+				"qcom,proxy-unvote",
+				"qcom,err-ready",
+				"qcom,stop-ack";
+
+		/* Outputs to lpass */
+		qcom,smem-states = <&adsp_smp2p_out 0>;
+		qcom,smem-state-names = "qcom,force-stop";
+
+		mbox-names = "adsp-pil";
+	};
+
+	qcom,turing@8300000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x8300000 0x100000>;
+
+		vdd_cx-supply = <&VDD_CX_LEVEL>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+
+		clocks = <&clock_rpmh RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <18>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <601>;
+		qcom,sysmon-id = <7>;
+		qcom,ssctl-instance-id = <0x17>;
+		qcom,firmware-name = "cdsp";
+		memory-region = <&pil_cdsp_mem>;
+		qcom,complete-ramdump;
+
+		qcom,msm-bus,name = "pil-cdsp";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+			<154 10070 0 0>,
+			<154 10070 0 1>;
+
+		/* Inputs from turing */
+		interrupts = <GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+				<&cdsp_smp2p_in 0 0>,
+				<&cdsp_smp2p_in 2 0>,
+				<&cdsp_smp2p_in 1 0>,
+				<&cdsp_smp2p_in 3 0>;
+
+		interrupt-names = "qcom,wdog",
+				"qcom,err-fatal",
+				"qcom,proxy-unvote",
+				"qcom,err-ready",
+				"qcom,stop-ack";
+
+		/* Outputs to turing */
+		qcom,smem-states = <&cdsp_smp2p_out 0>;
+		qcom,smem-state-names = "qcom,force-stop";
+
+		mbox-names = "cdsp-pil";
+	};
 };
 
 #include "kona-bus.dtsi"
@@ -1154,3 +1280,6 @@
 #include "kona-pinctrl.dtsi"
 #include "kona-smp2p.dtsi"
 #include "kona-usb.dtsi"
+#include "kona-sde.dtsi"
+#include "kona-sde-pll.dtsi"
+#include "kona-sde-display.dtsi"
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 6a560f2..e18cfd6 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -66,6 +66,7 @@
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
@@ -206,6 +207,7 @@
 CONFIG_NET_ACT_MIRRED=y
 CONFIG_NET_ACT_SKBEDIT=y
 CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
 CONFIG_BT=y
 CONFIG_CFG80211=y
 CONFIG_RFKILL=y
@@ -347,6 +349,7 @@
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_KONA_LLCC=y
+CONFIG_QCOM_QMI_HELPERS=y
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMP2P=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 232a762..f799b6e 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -67,6 +67,7 @@
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
@@ -212,6 +213,7 @@
 CONFIG_NET_ACT_SKBEDIT=y
 CONFIG_DNS_RESOLVER=y
 CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
 CONFIG_BT=y
 CONFIG_CFG80211=y
 # CONFIG_CFG80211_CRDA_SUPPORT is not set
@@ -359,6 +361,7 @@
 CONFIG_QCOM_IPCC=y
 CONFIG_QCOM_LLCC=y
 CONFIG_QCOM_KONA_LLCC=y
+CONFIG_QCOM_QMI_HELPERS=y
 CONFIG_QCOM_RPMH=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMP2P=y
@@ -375,6 +378,7 @@
 CONFIG_QCOM_GLINK=y
 CONFIG_QCOM_GLINK_PKT=y
 CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
 CONFIG_IIO=y
 CONFIG_PWM=y
 CONFIG_QCOM_PDC=y
@@ -390,7 +394,6 @@
 CONFIG_ESOC_MDM_4x=y
 CONFIG_ESOC_MDM_DRV=y
 CONFIG_ESOC_MDM_DBG_ENG=y
-CONFIG_MDM_DBG_REQ_ENG=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
index 8181b60..59f2bd8 100644
--- a/arch/arm64/include/asm/dma-iommu.h
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -40,6 +40,7 @@
 					struct dma_iommu_mapping *mapping);
 void arm_iommu_detach_device(struct device *dev);
 
+void arm_iommu_put_dma_cookie(struct iommu_domain *domain);
 #else  /* !CONFIG_ARM64_DMA_USE_IOMMU */
 
 static inline struct dma_iommu_mapping *
@@ -62,6 +63,7 @@
 {
 }
 
+static inline void arm_iommu_put_dma_cookie(struct iommu_domain *domain) {}
 #endif	/* CONFIG_ARM64_DMA_USE_IOMMU */
 
 #endif /* __KERNEL__ */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index c3e5b76..d35c8fa 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -594,6 +594,8 @@
 #else
 #define acpi_parse_and_init_cpus(...)	do { } while (0)
 #endif
+void (*__smp_cross_call)(const struct cpumask *, unsigned int);
+DEFINE_PER_CPU(bool, pending_ipi);
 
 /*
  * Enumerate the possible CPU set from the device tree and build the
@@ -735,8 +737,6 @@
 	}
 }
 
-void (*__smp_cross_call)(const struct cpumask *, unsigned int);
-
 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 {
 	__smp_cross_call = fn;
@@ -759,6 +759,17 @@
 	__smp_cross_call(target, ipinr);
 }
 
+static void smp_cross_call_common(const struct cpumask *cpumask,
+				  unsigned int func)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, cpumask)
+		per_cpu(pending_ipi, cpu) = true;
+
+	smp_cross_call(cpumask, func);
+}
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
 	unsigned int cpu, i;
@@ -786,18 +797,18 @@
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-	smp_cross_call(mask, IPI_CALL_FUNC);
+	smp_cross_call_common(mask, IPI_CALL_FUNC);
 }
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-	smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
+	smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
 #ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
 void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
 {
-	smp_cross_call(mask, IPI_WAKEUP);
+	smp_cross_call_common(mask, IPI_WAKEUP);
 }
 #endif
 
@@ -805,7 +816,8 @@
 void arch_irq_work_raise(void)
 {
 	if (__smp_cross_call)
-		smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
+		smp_cross_call_common(cpumask_of(smp_processor_id()),
+				      IPI_IRQ_WORK);
 }
 #endif
 
@@ -933,19 +945,20 @@
 
 	if ((unsigned)ipinr < NR_IPI)
 		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
+	per_cpu(pending_ipi, cpu) = false;
 	set_irq_regs(old_regs);
 }
 
 void smp_send_reschedule(int cpu)
 {
 	BUG_ON(cpu_is_offline(cpu));
-	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
+	smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 void tick_broadcast(const struct cpumask *mask)
 {
-	smp_cross_call(mask, IPI_TIMER);
+	smp_cross_call_common(mask, IPI_TIMER);
 }
 #endif
 
@@ -961,7 +974,7 @@
 
 		if (system_state <= SYSTEM_RUNNING)
 			pr_crit("SMP: stopping secondary CPUs\n");
-		smp_cross_call(&mask, IPI_CPU_STOP);
+		smp_cross_call_common(&mask, IPI_CPU_STOP);
 	}
 
 	/* Wait up to one second for other CPUs to stop */
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 035b2af..ce6528b 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -36,6 +36,7 @@
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/dma-iommu.h>
+#include <linux/of_address.h>
 #include <linux/dma-mapping-fast.h>
 #include <linux/msm_dma_iommu_mapping.h>
 #include <linux/arm-smmu-errata.h>
@@ -658,9 +659,6 @@
 	__dma_flush_area(virt, PAGE_SIZE);
 }
 
-static struct page **__atomic_get_pages(void *addr);
-static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs);
-
 static void *__iommu_alloc_attrs(struct device *dev, size_t size,
 				 dma_addr_t *handle, gfp_t gfp,
 				 unsigned long attrs)
@@ -979,50 +977,14 @@
 }
 arch_initcall(__iommu_dma_init);
 
-static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-				  const struct iommu_ops *ops)
-{
-	struct iommu_domain *domain;
-
-	if (!ops)
-		return;
-
-	/*
-	 * The IOMMU core code allocates the default DMA domain, which the
-	 * underlying IOMMU driver needs to support via the dma-iommu layer.
-	 */
-	domain = iommu_get_domain_for_dev(dev);
-
-	if (!domain)
-		goto out_err;
-
-	if (domain->type == IOMMU_DOMAIN_DMA) {
-		if (iommu_dma_init_domain(domain, dma_base, size, dev))
-			goto out_err;
-
-		dev->dma_ops = &iommu_dma_ops;
-	}
-
-	return;
-
-out_err:
-	 pr_debug("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
-		 dev_name(dev));
-}
-
 void arch_teardown_dma_ops(struct device *dev)
 {
 	dev->dma_ops = NULL;
 }
-
-#else
-
-static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
-				  const struct iommu_ops *iommu)
-{ }
-
 #endif  /* CONFIG_IOMMU_DMA */
 
+static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
+
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 			const struct iommu_ops *iommu, bool coherent)
 {
@@ -1030,7 +992,7 @@
 		dev->dma_ops = &arm64_swiotlb_dma_ops;
 
 	dev->archdata.dma_coherent = coherent;
-	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
+	arm_iommu_setup_dma_ops(dev, dma_base, size);
 
 #ifdef CONFIG_XEN
 	if (xen_initial_domain()) {
@@ -1043,831 +1005,146 @@
 
 #ifdef CONFIG_ARM64_DMA_USE_IOMMU
 
-static int __get_iommu_pgprot(unsigned long attrs, int prot,
-			      bool coherent)
-{
-	if (!(attrs & DMA_ATTR_EXEC_MAPPING))
-		prot |= IOMMU_NOEXEC;
-	if (attrs & DMA_ATTR_IOMMU_USE_UPSTREAM_HINT)
-		prot |= IOMMU_USE_UPSTREAM_HINT;
-	if (attrs & DMA_ATTR_IOMMU_USE_LLC_NWA)
-		prot |= IOMMU_USE_LLC_NWA;
-	if (coherent)
-		prot |= IOMMU_CACHE;
+/* guards initialization of default_domain->iova_cookie */
+static DEFINE_MUTEX(iommu_dma_init_mutex);
 
-	return prot;
-}
-
-/*
- * Make an area consistent for devices.
- * Note: Drivers should NOT use this function directly, as it will break
- * platforms with CONFIG_DMABOUNCE.
- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
- */
-static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
+static int
+iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
 {
-	__dma_map_area(page_address(page) + off, size, dir);
-}
-
-static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
-	size_t size, enum dma_data_direction dir)
-{
-	__dma_unmap_area(page_address(page) + off, size, dir);
+	struct iommu_domain *domain = mapping->domain;
+	dma_addr_t dma_base = mapping->base;
+	u64 size = mapping->bits << PAGE_SHIFT;
+	int ret;
+	bool own_cookie;
 
 	/*
-	 * Mark the D-cache clean for this page to avoid extra flushing.
+	 * if own_cookie is false, then we are sharing the iova_cookie with
+	 * another driver, and should not free it on error. Cleanup will be
+	 * done when the iommu_domain is freed.
 	 */
-	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
-		set_bit(PG_dcache_clean, &page->flags);
-}
+	own_cookie = !domain->iova_cookie;
 
-/* IOMMU */
-
-static void __dma_clear_buffer(struct page *page, size_t size,
-			       unsigned long attrs, bool is_coherent)
-{
-	/*
-	 * Ensure that the allocated pages are zeroed, and that any data
-	 * lurking in the kernel direct-mapped region is invalidated.
-	 */
-	void *ptr = page_address(page);
-
-	if (!(attrs & DMA_ATTR_SKIP_ZEROING))
-		memset(ptr, 0, size);
-	if (!is_coherent)
-		__dma_flush_area(ptr, size);
-}
-
-static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
-				      size_t size)
-{
-	unsigned int order;
-	unsigned int align = 0;
-	unsigned int count, start;
-	unsigned long flags;
-	dma_addr_t iova;
-	size_t guard_len;
-
-	size = PAGE_ALIGN(size);
-	if (mapping->min_iova_align)
-		guard_len = ALIGN(size, mapping->min_iova_align) - size;
-	else
-		guard_len = 0;
-
-	order = get_order(size + guard_len);
-	if (order > CONFIG_ARM64_DMA_IOMMU_ALIGNMENT)
-		order = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT;
-
-	count = PAGE_ALIGN(size + guard_len) >> PAGE_SHIFT;
-	align = (1 << order) - 1;
-
-	spin_lock_irqsave(&mapping->lock, flags);
-	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
-					   count, align);
-	if (start > mapping->bits) {
-		spin_unlock_irqrestore(&mapping->lock, flags);
-		return DMA_ERROR_CODE;
-	}
-
-	bitmap_set(mapping->bitmap, start, count);
-	spin_unlock_irqrestore(&mapping->lock, flags);
-
-	iova = mapping->base + (start << PAGE_SHIFT);
-
-	if (guard_len &&
-		iommu_map(mapping->domain, iova + size,
-			page_to_phys(mapping->guard_page),
-			guard_len, ARM_SMMU_GUARD_PROT)) {
-
-		spin_lock_irqsave(&mapping->lock, flags);
-		bitmap_clear(mapping->bitmap, start, count);
-		spin_unlock_irqrestore(&mapping->lock, flags);
-		return DMA_ERROR_CODE;
-	}
-
-	return iova;
-}
-
-static inline void __free_iova(struct dma_iommu_mapping *mapping,
-			       dma_addr_t addr, size_t size)
-{
-	unsigned int start;
-	unsigned int count;
-	unsigned long flags;
-	size_t guard_len;
-
-	addr = addr & PAGE_MASK;
-	size = PAGE_ALIGN(size);
-	if (mapping->min_iova_align) {
-		guard_len = ALIGN(size, mapping->min_iova_align) - size;
-		iommu_unmap(mapping->domain, addr + size, guard_len);
-	} else {
-		guard_len = 0;
-	}
-
-	start = (addr - mapping->base) >> PAGE_SHIFT;
-	count = (size + guard_len) >> PAGE_SHIFT;
-	spin_lock_irqsave(&mapping->lock, flags);
-	bitmap_clear(mapping->bitmap, start, count);
-	spin_unlock_irqrestore(&mapping->lock, flags);
-}
-
-static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
-					  gfp_t gfp, unsigned long attrs)
-{
-	struct page **pages;
-	size_t count = size >> PAGE_SHIFT;
-	size_t array_size = count * sizeof(struct page *);
-	int i = 0;
-	bool is_coherent = is_dma_coherent(dev, attrs);
-
-	if (array_size <= PAGE_SIZE)
-		pages = kzalloc(array_size, gfp);
-	else
-		pages = vzalloc(array_size);
-	if (!pages)
-		return NULL;
-
-	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-		unsigned long order = get_order(size);
-		struct page *page;
-
-		page = dma_alloc_from_contiguous(dev, count, order, GFP_KERNEL);
-		if (!page)
-			goto error;
-
-		__dma_clear_buffer(page, size, attrs, is_coherent);
-
-		for (i = 0; i < count; i++)
-			pages[i] = page + i;
-
-		return pages;
-	}
-
-	/*
-	 * IOMMU can map any pages, so himem can also be used here
-	 */
-	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
-
-	while (count) {
-		int j, order = __fls(count);
-
-		pages[i] = alloc_pages(gfp, order);
-		while (!pages[i] && order)
-			pages[i] = alloc_pages(gfp, --order);
-		if (!pages[i])
-			goto error;
-
-		if (order) {
-			split_page(pages[i], order);
-			j = 1 << order;
-			while (--j)
-				pages[i + j] = pages[i] + j;
-		}
-
-		__dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs,
-				   is_coherent);
-		i += 1 << order;
-		count -= 1 << order;
-	}
-
-	return pages;
-error:
-	while (i--)
-		if (pages[i])
-			__free_pages(pages[i], 0);
-	if (array_size <= PAGE_SIZE)
-		kfree(pages);
-	else
-		vfree(pages);
-	return NULL;
-}
-
-static int __iommu_free_buffer(struct device *dev, struct page **pages,
-			       size_t size, unsigned long attrs)
-{
-	int count = size >> PAGE_SHIFT;
-	int array_size = count * sizeof(struct page *);
-	int i;
-
-	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
-		dma_release_from_contiguous(dev, pages[0], count);
-	} else {
-		for (i = 0; i < count; i++)
-			if (pages[i])
-				__free_pages(pages[i], 0);
-	}
-
-	if (array_size <= PAGE_SIZE)
-		kfree(pages);
-	else
-		vfree(pages);
-	return 0;
-}
-
-/*
- * Create a CPU mapping for a specified pages
- */
-static void *
-__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
-		    const void *caller)
-{
-	return dma_common_pages_remap(pages, size, VM_USERMAP, prot, caller);
-}
-
-/*
- * Create a mapping in device IO address space for specified pages
- */
-static dma_addr_t __iommu_create_mapping(struct device *dev,
-					struct page **pages, size_t size,
-					unsigned long attrs)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-	dma_addr_t dma_addr, iova;
-	int i, ret;
-	int prot = IOMMU_READ | IOMMU_WRITE;
-
-	dma_addr = __alloc_iova(mapping, size);
-	if (dma_addr == DMA_ERROR_CODE)
-		return dma_addr;
-
-	prot = __get_iommu_pgprot(attrs, prot,
-				  is_dma_coherent(dev, attrs));
-
-	iova = dma_addr;
-	for (i = 0; i < count; ) {
-		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
-		phys_addr_t phys = page_to_phys(pages[i]);
-		unsigned int len, j;
-
-		for (j = i + 1; j < count; j++, next_pfn++)
-			if (page_to_pfn(pages[j]) != next_pfn)
-				break;
-
-		len = (j - i) << PAGE_SHIFT;
-		ret = iommu_map(mapping->domain, iova, phys, len, prot);
-		if (ret < 0)
-			goto fail;
-		iova += len;
-		i = j;
-	}
-	return dma_addr;
-fail:
-	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
-	__free_iova(mapping, dma_addr, size);
-	return DMA_ERROR_CODE;
-}
-
-static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova,
-				size_t size)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-
-	/*
-	 * add optional in-page offset from iova to size and align
-	 * result to page size
-	 */
-	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
-	iova &= PAGE_MASK;
-
-	iommu_unmap(mapping->domain, iova, size);
-	__free_iova(mapping, iova, size);
-	return 0;
-}
-
-static struct page **__atomic_get_pages(void *addr)
-{
-	struct page *page;
-	phys_addr_t phys;
-
-	phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
-	page = phys_to_page(phys);
-
-	return (struct page **)page;
-}
-
-static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
-{
-	struct vm_struct *area;
-
-	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
-		return __atomic_get_pages(cpu_addr);
-
-	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
-		return cpu_addr;
-
-	area = find_vm_area(cpu_addr);
-	if (area)
-		return area->pages;
-	return NULL;
-}
-
-static void *__iommu_alloc_atomic(struct device *dev, size_t size,
-				  dma_addr_t *handle, gfp_t gfp,
-				  unsigned long attrs)
-{
-	struct page *page;
-	struct page **pages;
-	size_t count = size >> PAGE_SHIFT;
-	size_t array_size = count * sizeof(struct page *);
-	int i;
-	void *addr;
-	bool coherent = is_dma_coherent(dev, attrs);
-
-	if (array_size <= PAGE_SIZE)
-		pages = kzalloc(array_size, gfp);
-	else
-		pages = vzalloc(array_size);
-
-	if (!pages)
-		return NULL;
-
-	if (coherent) {
-		page = alloc_pages(gfp, get_order(size));
-		addr = page ? page_address(page) : NULL;
-	} else {
-		addr = __alloc_from_pool(size, &page, gfp);
-	}
-
-	if (!addr)
-		goto err_free;
-
-	for (i = 0; i < count ; i++)
-		pages[i] = page + i;
-
-	*handle = __iommu_create_mapping(dev, pages, size, attrs);
-	if (*handle == DMA_ERROR_CODE)
-		goto err_mapping;
-
-	kvfree(pages);
-	return addr;
-
-err_mapping:
-	if (coherent)
-		__free_pages(page, get_order(size));
-	else
-		__free_from_pool(addr, size);
-err_free:
-	kvfree(pages);
-	return NULL;
-}
-
-static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
-				dma_addr_t handle, size_t size)
-{
-	__iommu_remove_mapping(dev, handle, size);
-	__free_from_pool(cpu_addr, size);
-}
-
-static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
-	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
-{
-	bool coherent = is_dma_coherent(dev, attrs);
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
-	struct page **pages;
-	void *addr = NULL;
-
-	*handle = DMA_ERROR_CODE;
-	size = PAGE_ALIGN(size);
-
-	if (!gfpflags_allow_blocking(gfp))
-		return __iommu_alloc_atomic(dev, size, handle, gfp, attrs);
-
-	/*
-	 * Following is a work-around (a.k.a. hack) to prevent pages
-	 * with __GFP_COMP being passed to split_page() which cannot
-	 * handle them.  The real problem is that this flag probably
-	 * should be 0 on ARM as it is not supported on this
-	 * platform; see CONFIG_HUGETLBFS.
-	 */
-	gfp &= ~(__GFP_COMP);
-
-	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
-	if (!pages)
-		return NULL;
-
-	*handle = __iommu_create_mapping(dev, pages, size, attrs);
-	if (*handle == DMA_ERROR_CODE)
-		goto err_buffer;
-
-	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
-		return pages;
-
-	addr = __iommu_alloc_remap(pages, size, gfp, prot,
-				   __builtin_return_address(0));
-	if (!addr)
-		goto err_mapping;
-
-	return addr;
-
-err_mapping:
-	__iommu_remove_mapping(dev, *handle, size);
-err_buffer:
-	__iommu_free_buffer(dev, pages, size, attrs);
-	return NULL;
-}
-
-static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
-		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
-		    unsigned long attrs)
-{
-	unsigned long uaddr = vma->vm_start;
-	unsigned long usize = vma->vm_end - vma->vm_start;
-	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
-	bool coherent = is_dma_coherent(dev, attrs);
-
-	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
-					     coherent);
-
-	if (!pages)
-		return -ENXIO;
-
-	do {
-		int ret = vm_insert_page(vma, uaddr, *pages++);
-
+	if (own_cookie) {
+		ret = iommu_get_dma_cookie(domain);
 		if (ret) {
-			pr_err("Remapping memory failed: %d\n", ret);
+			dev_err(dev, "iommu_get_dma_cookie failed: %d\n", ret);
 			return ret;
 		}
-		uaddr += PAGE_SIZE;
-		usize -= PAGE_SIZE;
-	} while (usize > 0);
+	}
 
+	ret = iommu_dma_init_domain(domain, dma_base, size, dev);
+	if (ret) {
+		dev_err(dev, "iommu_dma_init_domain failed: %d\n", ret);
+		if (own_cookie)
+			iommu_put_dma_cookie(domain);
+		return ret;
+	}
+
+	mapping->ops = &iommu_dma_ops;
 	return 0;
 }
 
+static int arm_iommu_get_dma_cookie(struct device *dev,
+				    struct dma_iommu_mapping *mapping)
+{
+	int s1_bypass = 0, is_fast = 0;
+	int err = 0;
+
+	mutex_lock(&iommu_dma_init_mutex);
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+
+	if (s1_bypass)
+		mapping->ops = &swiotlb_dma_ops;
+	else if (is_fast)
+		err = fast_smmu_init_mapping(dev, mapping);
+	else
+		err = iommu_init_mapping(dev, mapping);
+
+	mutex_unlock(&iommu_dma_init_mutex);
+	return err;
+}
+
+void arm_iommu_put_dma_cookie(struct iommu_domain *domain)
+{
+	int s1_bypass = 0, is_fast = 0;
+
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &is_fast);
+
+	if (is_fast)
+		fast_smmu_put_dma_cookie(domain);
+	else if (!s1_bypass)
+		iommu_put_dma_cookie(domain);
+}
+
 /*
- * free a page as defined by the above mapping.
- * Must not be called with IRQs disabled.
+ * Checks for "dma-ranges" property in the node pointed to by qcom,iommu-group.
+ * Refer to devicetree/booting-without-of.txt for the property definition
+ * Default if property not found:
+ * dma_addr = 0, dma_size = 1 << 32
  */
-void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
-			  dma_addr_t handle, unsigned long attrs)
+static void arm_iommu_get_dma_window(struct device *dev, u64 *dma_addr,
+					u64 *dma_size)
 {
-	struct page **pages;
+	struct device_node *np;
+	u64 addr, phys, size;
+	int ret;
 
-	size = PAGE_ALIGN(size);
+	if (!dev->of_node)
+		return;
 
-	if (__in_atomic_pool(cpu_addr, size)) {
-		__iommu_free_atomic(dev, cpu_addr, handle, size);
+	np = of_parse_phandle(dev->of_node, "qcom,iommu-group", 0);
+	if (!np)
+		return;
+
+	ret = of_dma_get_range(np, &addr, &phys, &size);
+	if (!ret) {
+		*dma_addr = addr;
+		*dma_size = size;
+	}
+}
+
+static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
+{
+	struct iommu_domain *domain;
+	struct iommu_group *group;
+	struct dma_iommu_mapping mapping = {0};
+
+	group = dev->iommu_group;
+	if (!group)
+		return;
+
+	arm_iommu_get_dma_window(dev, &dma_base, &size);
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain)
+		return;
+
+	/* Allow iommu-debug to call arch_setup_dma_ops to reconfigure itself */
+	if (domain->type != IOMMU_DOMAIN_DMA &&
+	    !of_device_is_compatible(dev->of_node, "iommu-debug-test")) {
+		dev_err(dev, "Invalid iommu domain type!\n");
 		return;
 	}
 
-	pages = __iommu_get_pages(cpu_addr, attrs);
-	if (!pages) {
-		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+	mapping.base = dma_base;
+	mapping.bits = size >> PAGE_SHIFT;
+	mapping.domain = domain;
+
+	if (arm_iommu_get_dma_cookie(dev, &mapping)) {
+		dev_err(dev, "Failed to get dma cookie\n");
 		return;
 	}
 
-	if (!(attrs & DMA_ATTR_NO_KERNEL_MAPPING))
-		dma_common_free_remap(cpu_addr, size, VM_USERMAP, true);
-
-	__iommu_remove_mapping(dev, handle, size);
-	__iommu_free_buffer(dev, pages, size, attrs);
-}
-
-int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
-				 void *cpu_addr, dma_addr_t dma_addr,
-				 size_t size, unsigned long attrs)
-{
-	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
-	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
-
-	if (!pages)
-		return -ENXIO;
-
-	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
-					 GFP_KERNEL);
-}
-
-static int __dma_direction_to_prot(enum dma_data_direction dir)
-{
-	int prot;
-
-	switch (dir) {
-	case DMA_BIDIRECTIONAL:
-		prot = IOMMU_READ | IOMMU_WRITE;
-		break;
-	case DMA_TO_DEVICE:
-		prot = IOMMU_READ;
-		break;
-	case DMA_FROM_DEVICE:
-		prot = IOMMU_WRITE;
-		break;
-	default:
-		prot = 0;
-	}
-
-	return prot;
+	set_dma_ops(dev, mapping.ops);
 }
 
 /**
- * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to map
- * @dir: DMA transfer direction
- *
- * Map a set of buffers described by scatterlist in streaming mode for DMA.
- * The scatter gather list elements are merged together (if possible) and
- * tagged with the appropriate dma address and length. They are obtained via
- * sg_dma_{address,length}.
- */
-int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
-		int nents, enum dma_data_direction dir, unsigned long attrs)
-{
-	struct scatterlist *s;
-	int ret, i;
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	unsigned int total_length = 0, current_offset = 0;
-	dma_addr_t iova;
-	int prot = __dma_direction_to_prot(dir);
-
-	for_each_sg(sg, s, nents, i)
-		total_length += s->length;
-
-	iova = __alloc_iova(mapping, total_length);
-	if (iova == DMA_ERROR_CODE) {
-		dev_err(dev, "Couldn't allocate iova for sg %p\n", sg);
-		return 0;
-	}
-	prot = __get_iommu_pgprot(attrs, prot,
-				  is_dma_coherent(dev, attrs));
-
-	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
-	if (ret != total_length) {
-		__free_iova(mapping, iova, total_length);
-		return 0;
-	}
-
-	for_each_sg(sg, s, nents, i) {
-		s->dma_address = iova + current_offset;
-		s->dma_length = total_length - current_offset;
-		current_offset += s->length;
-	}
-
-	return nents;
-}
-
-/**
- * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- *
- * Unmap a set of streaming mode DMA translations.  Again, CPU access
- * rules concerning calls here are the same as for dma_unmap_single().
- */
-void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
-			enum dma_data_direction dir, unsigned long attrs)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	unsigned int total_length = sg_dma_len(sg);
-	dma_addr_t iova = sg_dma_address(sg);
-
-	total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
-	iova &= PAGE_MASK;
-
-	iommu_unmap(mapping->domain, iova, total_length);
-	__free_iova(mapping, iova, total_length);
-}
-
-/**
- * arm_iommu_sync_sg_for_cpu
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to map (returned from dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- */
-void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
-			int nents, enum dma_data_direction dir)
-{
-	struct scatterlist *s;
-	int i;
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	dma_addr_t iova = sg_dma_address(sg);
-	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
-
-	if (iova_coherent)
-		return;
-
-	for_each_sg(sg, s, nents, i)
-		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
-
-}
-
-/**
- * arm_iommu_sync_sg_for_device
- * @dev: valid struct device pointer
- * @sg: list of buffers
- * @nents: number of buffers to map (returned from dma_map_sg)
- * @dir: DMA transfer direction (same as was passed to dma_map_sg)
- */
-void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-			int nents, enum dma_data_direction dir)
-{
-	struct scatterlist *s;
-	int i;
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	dma_addr_t iova = sg_dma_address(sg);
-	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, iova);
-
-	if (iova_coherent)
-		return;
-
-	for_each_sg(sg, s, nents, i)
-		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
-}
-
-
-/**
- * arm_coherent_iommu_map_page
- * @dev: valid struct device pointer
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * Coherent IOMMU aware version of arm_dma_map_page()
- */
-static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
-	     struct page *page, unsigned long offset, size_t size,
-	     enum dma_data_direction dir, unsigned long attrs)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	dma_addr_t dma_addr;
-	int ret, prot, len, start_offset, map_offset;
-
-	map_offset = offset & ~PAGE_MASK;
-	start_offset = offset & PAGE_MASK;
-	len = PAGE_ALIGN(map_offset + size);
-
-	dma_addr = __alloc_iova(mapping, len);
-	if (dma_addr == DMA_ERROR_CODE)
-		return dma_addr;
-
-	prot = __dma_direction_to_prot(dir);
-	prot = __get_iommu_pgprot(attrs, prot,
-				  is_dma_coherent(dev, attrs));
-
-	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
-			start_offset, len, prot);
-	if (ret < 0)
-		goto fail;
-
-	return dma_addr + map_offset;
-fail:
-	__free_iova(mapping, dma_addr, len);
-	return DMA_ERROR_CODE;
-}
-
-/**
- * arm_iommu_map_page
- * @dev: valid struct device pointer
- * @page: page that buffer resides in
- * @offset: offset into page for start of buffer
- * @size: size of buffer to map
- * @dir: DMA transfer direction
- *
- * IOMMU aware version of arm_dma_map_page()
- */
-static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
-	     unsigned long offset, size_t size, enum dma_data_direction dir,
-	     unsigned long attrs)
-{
-	if (!is_dma_coherent(dev, attrs) &&
-	      !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		__dma_page_cpu_to_dev(page, offset, size, dir);
-
-	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
-}
-
-/**
- * arm_iommu_unmap_page
- * @dev: valid struct device pointer
- * @handle: DMA address of buffer
- * @size: size of buffer (same as passed to dma_map_page)
- * @dir: DMA transfer direction (same as passed to dma_map_page)
- *
- * IOMMU aware version of arm_dma_unmap_page()
- */
-static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
-		size_t size, enum dma_data_direction dir,
-		unsigned long attrs)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page = phys_to_page(iommu_iova_to_phys(
-						mapping->domain, iova));
-	int offset = handle & ~PAGE_MASK;
-	int len = PAGE_ALIGN(size + offset);
-
-	if (!(is_dma_coherent(dev, attrs) ||
-	      (attrs & DMA_ATTR_SKIP_CPU_SYNC)))
-		__dma_page_dev_to_cpu(page, offset, size, dir);
-
-	iommu_unmap(mapping->domain, iova, len);
-	__free_iova(mapping, iova, len);
-}
-
-static void arm_iommu_sync_single_for_cpu(struct device *dev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page = phys_to_page(iommu_iova_to_phys(
-						mapping->domain, iova));
-	unsigned int offset = handle & ~PAGE_MASK;
-	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
-
-	if (!iova_coherent)
-		__dma_page_dev_to_cpu(page, offset, size, dir);
-}
-
-static void arm_iommu_sync_single_for_device(struct device *dev,
-		dma_addr_t handle, size_t size, enum dma_data_direction dir)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	dma_addr_t iova = handle & PAGE_MASK;
-	struct page *page = phys_to_page(iommu_iova_to_phys(
-						mapping->domain, iova));
-	unsigned int offset = handle & ~PAGE_MASK;
-	bool iova_coherent = iommu_is_iova_coherent(mapping->domain, handle);
-
-	if (!iova_coherent)
-		__dma_page_cpu_to_dev(page, offset, size, dir);
-}
-
-static dma_addr_t arm_iommu_dma_map_resource(
-			struct device *dev, phys_addr_t phys_addr,
-			size_t size, enum dma_data_direction dir,
-			unsigned long attrs)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	size_t offset = phys_addr & ~PAGE_MASK;
-	size_t len = PAGE_ALIGN(size + offset);
-	dma_addr_t dma_addr;
-	int prot;
-
-	dma_addr = __alloc_iova(mapping, len);
-	if (dma_addr == DMA_ERROR_CODE)
-		return dma_addr;
-
-	prot = __dma_direction_to_prot(dir);
-	prot |= IOMMU_MMIO;
-
-	if (iommu_map(mapping->domain, dma_addr, phys_addr - offset,
-			len, prot)) {
-		__free_iova(mapping, dma_addr, len);
-		return DMA_ERROR_CODE;
-	}
-	return dma_addr + offset;
-}
-
-static void arm_iommu_dma_unmap_resource(
-			struct device *dev, dma_addr_t addr,
-			size_t size, enum dma_data_direction dir,
-			unsigned long attrs)
-{
-	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-	size_t offset = addr & ~PAGE_MASK;
-	size_t len = PAGE_ALIGN(size + offset);
-
-	iommu_unmap(mapping->domain, addr - offset, len);
-	__free_iova(mapping, addr - offset, len);
-}
-
-static int arm_iommu_mapping_error(struct device *dev,
-				   dma_addr_t dma_addr)
-{
-	return dma_addr == DMA_ERROR_CODE;
-}
-
-const struct dma_map_ops iommu_ops = {
-	.alloc		= arm_iommu_alloc_attrs,
-	.free		= arm_iommu_free_attrs,
-	.mmap		= arm_iommu_mmap_attrs,
-	.get_sgtable	= arm_iommu_get_sgtable,
-
-	.map_page		= arm_iommu_map_page,
-	.unmap_page		= arm_iommu_unmap_page,
-	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
-	.sync_single_for_device	= arm_iommu_sync_single_for_device,
-
-	.map_sg			= arm_iommu_map_sg,
-	.unmap_sg		= arm_iommu_unmap_sg,
-	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
-	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
-
-	.map_resource		= arm_iommu_dma_map_resource,
-	.unmap_resource		= arm_iommu_dma_unmap_resource,
-
-	.mapping_error		= arm_iommu_mapping_error,
-};
-
-/**
+ * DEPRECATED
  * arm_iommu_create_mapping
  * @bus: pointer to the bus holding the client device (for IOMMU calls)
  * @base: start address of the valid IO address space
@@ -1909,78 +1186,8 @@
 }
 EXPORT_SYMBOL(arm_iommu_create_mapping);
 
-static int
-iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
-{
-	struct iommu_domain *domain = mapping->domain;
-	dma_addr_t dma_base = mapping->base;
-	u64 size = mapping->bits << PAGE_SHIFT;
-
-	/* Prepare the domain */
-	if (iommu_get_dma_cookie(domain))
-		return -EINVAL;
-
-	if (iommu_dma_init_domain(domain, dma_base, size, dev))
-		goto out_put_cookie;
-
-	mapping->ops = &iommu_dma_ops;
-	return 0;
-
-out_put_cookie:
-	iommu_put_dma_cookie(domain);
-	return -EINVAL;
-}
-
-static int
-bitmap_iommu_init_mapping(struct device *dev, struct dma_iommu_mapping *mapping)
-{
-	unsigned int bitmap_size = BITS_TO_LONGS(mapping->bits) * sizeof(long);
-	int vmid = VMID_HLOS;
-	int min_iova_align = 0;
-
-	iommu_domain_get_attr(mapping->domain,
-			DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN,
-			&min_iova_align);
-	iommu_domain_get_attr(mapping->domain,
-			DOMAIN_ATTR_SECURE_VMID, &vmid);
-	if (vmid >= VMID_LAST || vmid < 0)
-		vmid = VMID_HLOS;
-
-	if (min_iova_align) {
-		mapping->min_iova_align = ARM_SMMU_MIN_IOVA_ALIGN;
-		mapping->guard_page = arm_smmu_errata_get_guard_page(vmid);
-		if (!mapping->guard_page)
-			return -ENOMEM;
-	}
-
-	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
-				__GFP_NORETRY);
-	if (!mapping->bitmap)
-		mapping->bitmap = vzalloc(bitmap_size);
-
-	if (!mapping->bitmap)
-		return -ENOMEM;
-
-	spin_lock_init(&mapping->lock);
-	mapping->ops = &iommu_ops;
-	return 0;
-}
-
-static void release_iommu_mapping(struct kref *kref)
-{
-	int is_bitmap = 0;
-	struct dma_iommu_mapping *mapping =
-		container_of(kref, struct dma_iommu_mapping, kref);
-
-	iommu_domain_get_attr(mapping->domain,
-				DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR, &is_bitmap);
-	if (is_bitmap)
-		kfree(mapping->bitmap);
-	iommu_domain_free(mapping->domain);
-	kfree(mapping);
-}
-
 /*
+ * DEPRECATED
  * arm_iommu_release_mapping
  * @mapping: allocted via arm_iommu_create_mapping()
  *
@@ -1989,71 +1196,18 @@
  */
 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
 {
-	int is_fast = 0;
-	void (*release)(struct kref *kref);
-
 	if (!mapping)
 		return;
 
-	if (!mapping->init) {
+	if (mapping->domain)
 		iommu_domain_free(mapping->domain);
-		kfree(mapping);
-		return;
-	}
 
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
-
-	if (is_fast)
-		release = fast_smmu_release_mapping;
-	else
-		release = release_iommu_mapping;
-
-	kref_put(&mapping->kref, release);
+	kfree(mapping);
 }
 EXPORT_SYMBOL(arm_iommu_release_mapping);
 
-static int arm_iommu_init_mapping(struct device *dev,
-			    struct dma_iommu_mapping *mapping)
-{
-	int err = -EINVAL;
-	int s1_bypass = 0, is_fast = 0, is_bitmap = 0;
-	dma_addr_t iova_end;
-
-	if (mapping->init) {
-		kref_get(&mapping->kref);
-		return 0;
-	}
-
-	iova_end = mapping->base + (mapping->bits << PAGE_SHIFT) - 1;
-	if (iova_end > dma_get_mask(dev)) {
-		dev_err(dev, "dma mask %llx too small for requested iova range %pad to %pad\n",
-			dma_get_mask(dev), &mapping->base, &iova_end);
-		return -EINVAL;
-	}
-
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
-					&s1_bypass);
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
-	iommu_domain_get_attr(mapping->domain,
-			DOMAIN_ATTR_BITMAP_IOVA_ALLOCATOR, &is_bitmap);
-
-	if (s1_bypass) {
-		mapping->ops = &swiotlb_dma_ops;
-		err = 0;
-	} else if (is_fast)
-		err = fast_smmu_init_mapping(dev, mapping);
-	else if (is_bitmap)
-		err = bitmap_iommu_init_mapping(dev, mapping);
-	else
-		err = iommu_init_mapping(dev, mapping);
-	if (!err) {
-		kref_init(&mapping->kref);
-		mapping->init = true;
-	}
-	return err;
-}
-
 /**
+ * DEPRECATED
  * arm_iommu_attach_device
  * @dev: valid struct device pointer
  * @mapping: io address space mapping structure (returned from
@@ -2063,15 +1217,13 @@
  * this replaces the dma operations (dma_map_ops pointer) with the
  * IOMMU aware version.
  *
- * Clients are expected to call arm_iommu_attach_device() prior to sharing
- * the dma_iommu_mapping structure with another device. This ensures
- * initialization is complete.
+ * Only configures dma_ops for a single device in the iommu_group.
  */
 int arm_iommu_attach_device(struct device *dev,
 			    struct dma_iommu_mapping *mapping)
 {
 	int err;
-	struct iommu_domain *domain = mapping->domain;
+	struct iommu_domain *domain;
 	struct iommu_group *group = dev->iommu_group;
 
 	if (!group) {
@@ -2079,17 +1231,30 @@
 		return -EINVAL;
 	}
 
-	if (iommu_get_domain_for_dev(dev)) {
-		dev_err(dev, "Device already attached to other iommu_domain\n");
-		return -EINVAL;
+	domain = iommu_get_domain_for_dev(dev);
+	if (domain) {
+		int dynamic = 0;
+
+		iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &dynamic);
+
+		if ((domain->type == IOMMU_DOMAIN_DMA) && dynamic) {
+			dev_warn(dev, "Deprecated API %s in use! Continuing anyway\n",
+				__func__);
+		} else {
+			dev_err(dev, "Device already attached to other iommu_domain\n");
+			return -EINVAL;
+		}
 	}
 
 	err = iommu_attach_group(mapping->domain, group);
-	if (err)
-		return err;
-
-	err = arm_iommu_init_mapping(dev, mapping);
 	if (err) {
+		dev_err(dev, "iommu_attach_group failed\n");
+		return err;
+	}
+
+	err = arm_iommu_get_dma_cookie(dev, mapping);
+	if (err) {
+		dev_err(dev, "arm_iommu_get_dma_cookie failed\n");
 		iommu_detach_group(domain, group);
 		return err;
 	}
@@ -2103,6 +1268,7 @@
 EXPORT_SYMBOL(arm_iommu_attach_device);
 
 /**
+ * DEPRECATED
  * arm_iommu_detach_device
  * @dev: valid struct device pointer
  *
@@ -2111,22 +1277,21 @@
  */
 void arm_iommu_detach_device(struct device *dev)
 {
-	struct dma_iommu_mapping *mapping;
+	struct iommu_domain *domain;
 	int s1_bypass = 0;
 
-	mapping = to_dma_iommu_mapping(dev);
-	if (!mapping) {
-		dev_warn(dev, "Not attached\n");
-		return;
-	}
-
 	if (!dev->iommu_group) {
 		dev_err(dev, "No iommu associated with device\n");
 		return;
 	}
 
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
-					&s1_bypass);
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain) {
+		dev_warn(dev, "Not attached\n");
+		return;
+	}
+
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
 
 	/*
 	 * ION defers dma_unmap calls. Ensure they have all completed prior to
@@ -2135,7 +1300,7 @@
 	if (msm_dma_unmap_all_for_dev(dev))
 		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
 
-	iommu_detach_group(mapping->domain, dev->iommu_group);
+	iommu_detach_group(domain, dev->iommu_group);
 	dev->archdata.mapping = NULL;
 	if (!s1_bypass)
 		set_dma_ops(dev, NULL);
@@ -2144,4 +1309,10 @@
 }
 EXPORT_SYMBOL(arm_iommu_detach_device);
 
+#else /*!CONFIG_ARM64_DMA_USE_IOMMU */
+
+static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
+{
+}
 #endif
+
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index da42423..57dae20 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -22,7 +22,7 @@
 };
 
 static const struct firmware_info firmware_table[] = {
-	{.dev_id = 0x306, .fw_image = "sbl1.mbn"},
+	{.dev_id = 0x306, .fw_image = "sdx55m/sbl1.mbn"},
 	{.dev_id = 0x305, .fw_image = "sdx50m/sbl1.mbn"},
 	{.dev_id = 0x304, .fw_image = "sbl.mbn", .edl_image = "edl.mbn"},
 	/* default, set to debug.mbn */
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index db57d4c..2a28173 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -1305,14 +1305,19 @@
 	bool auto_start = false;
 	int ret;
 
+	/* bring device out of lpm */
+	ret = mhi_device_get_sync(mhi_dev);
+	if (ret)
+		return ret;
 
+	ret = -EINVAL;
 	if (ul_chan) {
 		/* lpm notification require status_cb */
 		if (ul_chan->lpm_notify && !mhi_drv->status_cb)
-			return -EINVAL;
+			goto exit_probe;
 
 		if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb)
-			return -EINVAL;
+			goto exit_probe;
 
 		ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
 		mhi_dev->status_cb = mhi_drv->status_cb;
@@ -1321,10 +1326,10 @@
 
 	if (dl_chan) {
 		if (dl_chan->lpm_notify && !mhi_drv->status_cb)
-			return -EINVAL;
+			goto exit_probe;
 
 		if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb)
-			return -EINVAL;
+			goto exit_probe;
 
 		mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index];
 
@@ -1334,7 +1339,7 @@
 		 * cb whenever there are pending data
 		 */
 		if (mhi_event->cl_manage && !mhi_drv->status_cb)
-			return -EINVAL;
+			goto exit_probe;
 
 		dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
 
@@ -1348,6 +1353,9 @@
 	if (!ret && auto_start)
 		mhi_prepare_for_transfer(mhi_dev);
 
+exit_probe:
+	mhi_device_put(mhi_dev);
+
 	return ret;
 }
 
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
index 9879e31..c211d35 100644
--- a/drivers/bus/mhi/devices/mhi_uci.c
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -191,10 +191,10 @@
 		}
 	}
 
-	mutex_unlock(&uci_dev->mutex);
-
 	MSG_LOG("exit: ref_count:%d\n", uci_dev->ref_count);
 
+	mutex_unlock(&uci_dev->mutex);
+
 	return 0;
 }
 
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 3147c63..72b273a 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -1,3 +1,5 @@
+source "drivers/clk/qcom/mdss/Kconfig"
+
 config QCOM_GDSC
 	bool
 	select PM_GENERIC_DOMAINS if PM
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 448844f..645e3d8 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -47,3 +47,5 @@
 obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
 obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
 obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
+
+obj-y += mdss/
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index ab51c4a..b092080 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1189,7 +1189,7 @@
 		(mode_regval & PLL_OUTCTRL));
 }
 
-int alpha_pll_lucid_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+int clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
 				const struct alpha_pll_config *config)
 {
 	int ret;
@@ -1213,7 +1213,10 @@
 	if (config->l)
 		regmap_write(regmap, PLL_L_VAL(pll), config->l);
 
-	regmap_write(regmap, PLL_CAL_L_VAL(pll), LUCID_PLL_CAL_VAL);
+	if (config->cal_l)
+		regmap_write(regmap, PLL_CAL_L_VAL(pll), config->cal_l);
+	else
+		regmap_write(regmap, PLL_CAL_L_VAL(pll), LUCID_PLL_CAL_VAL);
 
 	if (config->alpha)
 		regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
@@ -1229,9 +1232,29 @@
 		regmap_write(regmap, PLL_CONFIG_CTL_U1(pll),
 				config->config_ctl_hi1_val);
 
-	if (config->post_div_mask)
-		regmap_update_bits(regmap, PLL_USER_CTL(pll),
-			config->post_div_mask, config->post_div_val);
+	if (config->user_ctl_val)
+		regmap_write(regmap, PLL_USER_CTL(pll),
+				config->user_ctl_val);
+
+	if (config->user_ctl_hi_val)
+		regmap_write(regmap, PLL_USER_CTL_U(pll),
+				config->user_ctl_hi_val);
+
+	if (config->user_ctl_hi1_val)
+		regmap_write(regmap, PLL_USER_CTL_U1(pll),
+				config->user_ctl_hi1_val);
+
+	if (config->test_ctl_val)
+		regmap_write(regmap, PLL_TEST_CTL(pll),
+				config->test_ctl_val);
+
+	if (config->test_ctl_hi_val)
+		regmap_write(regmap, PLL_TEST_CTL_U(pll),
+				config->test_ctl_hi_val);
+
+	if (config->test_ctl_hi1_val)
+		regmap_write(regmap, PLL_TEST_CTL_U1(pll),
+				config->test_ctl_hi1_val);
 
 	regmap_update_bits(regmap, PLL_MODE(pll),
 				 PLL_UPDATE_BYPASS,
@@ -1278,7 +1301,7 @@
 	}
 
 	if (unlikely(!pll->inited)) {
-		ret = alpha_pll_lucid_configure(pll, pll->clkr.regmap,
+		ret = clk_lucid_pll_configure(pll, pll->clkr.regmap,
 						pll->config);
 		if (ret) {
 			pr_err("Failed to configure %s\n", clk_hw_get_name(hw));
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 1eb611f..458c8b6 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -93,11 +93,18 @@
 
 struct alpha_pll_config {
 	u32 l;
+	u32 cal_l;
 	u32 alpha;
 	u32 alpha_hi;
+	u32 user_ctl_val;
+	u32 user_ctl_hi_val;
+	u32 user_ctl_hi1_val;
 	u32 config_ctl_val;
 	u32 config_ctl_hi_val;
 	u32 config_ctl_hi1_val;
+	u32 test_ctl_val;
+	u32 test_ctl_hi_val;
+	u32 test_ctl_hi1_val;
 	u32 main_output_mask;
 	u32 aux_output_mask;
 	u32 aux2_output_mask;
diff --git a/drivers/clk/qcom/mdss/Kconfig b/drivers/clk/qcom/mdss/Kconfig
new file mode 100644
index 0000000..e728448
--- /dev/null
+++ b/drivers/clk/qcom/mdss/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config QCOM_MDSS_PLL
+	bool "MDSS pll programming"
+	depends on COMMON_CLK_QCOM
+	help
+	It provides support for DSI, eDP and HDMI interface pll programming on MDSS
+	hardware. It also handles the pll specific resources and turn them on/off when
+	mdss pll client tries to enable/disable pll clocks.
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
new file mode 100644
index 0000000..276fcbe
--- /dev/null
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-10nm-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-7nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-7nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-7nm-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28lpm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-28nm-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-14nm-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dp-pll-14nm.o
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
new file mode 100644
index 0000000..02914fa
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm-util.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/usb/usbpd.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+#define DP_PHY_REVISION_ID0			0x0000
+#define DP_PHY_REVISION_ID1			0x0004
+#define DP_PHY_REVISION_ID2			0x0008
+#define DP_PHY_REVISION_ID3			0x000C
+
+#define DP_PHY_CFG				0x0010
+#define DP_PHY_PD_CTL				0x0018
+#define DP_PHY_MODE				0x001C
+
+#define DP_PHY_AUX_CFG0				0x0020
+#define DP_PHY_AUX_CFG1				0x0024
+#define DP_PHY_AUX_CFG2				0x0028
+#define DP_PHY_AUX_CFG3				0x002C
+#define DP_PHY_AUX_CFG4				0x0030
+#define DP_PHY_AUX_CFG5				0x0034
+#define DP_PHY_AUX_CFG6				0x0038
+#define DP_PHY_AUX_CFG7				0x003C
+#define DP_PHY_AUX_CFG8				0x0040
+#define DP_PHY_AUX_CFG9				0x0044
+#define DP_PHY_AUX_INTERRUPT_MASK		0x0048
+#define DP_PHY_AUX_INTERRUPT_CLEAR		0x004C
+#define DP_PHY_AUX_BIST_CFG			0x0050
+
+#define DP_PHY_VCO_DIV				0x0064
+#define DP_PHY_TX0_TX1_LANE_CTL			0x006C
+#define DP_PHY_TX2_TX3_LANE_CTL			0x0088
+
+#define DP_PHY_SPARE0				0x00AC
+#define DP_PHY_STATUS				0x00C0
+
+/* Tx registers */
+#define TXn_BIST_MODE_LANENO			0x0000
+#define TXn_CLKBUF_ENABLE			0x0008
+#define TXn_TX_EMP_POST1_LVL			0x000C
+
+#define TXn_TX_DRV_LVL				0x001C
+
+#define TXn_RESET_TSYNC_EN			0x0024
+#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
+#define TXn_TX_BAND				0x002C
+#define TXn_SLEW_CNTL				0x0030
+#define TXn_INTERFACE_SELECT			0x0034
+
+#define TXn_RES_CODE_LANE_TX			0x003C
+#define TXn_RES_CODE_LANE_RX			0x0040
+#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
+#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
+
+#define TXn_DEBUG_BUS_SEL			0x0058
+#define TXn_TRANSCEIVER_BIAS_EN			0x005C
+#define TXn_HIGHZ_DRVR_EN			0x0060
+#define TXn_TX_POL_INV				0x0064
+#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
+
+#define TXn_LANE_MODE_1				0x008C
+
+#define TXn_TRAN_DRVR_EMP_EN			0x00C0
+#define TXn_TX_INTERFACE_MODE			0x00C4
+
+#define TXn_VMODE_CTRL1				0x00F0
+
+/* PLL register offset */
+#define QSERDES_COM_ATB_SEL1			0x0000
+#define QSERDES_COM_ATB_SEL2			0x0004
+#define QSERDES_COM_FREQ_UPDATE			0x0008
+#define QSERDES_COM_BG_TIMER			0x000C
+#define QSERDES_COM_SSC_EN_CENTER		0x0010
+#define QSERDES_COM_SSC_ADJ_PER1		0x0014
+#define QSERDES_COM_SSC_ADJ_PER2		0x0018
+#define QSERDES_COM_SSC_PER1			0x001C
+#define QSERDES_COM_SSC_PER2			0x0020
+#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
+#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
+#define QSERDES_COM_POST_DIV			0x002C
+#define QSERDES_COM_POST_DIV_MUX		0x0030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
+#define QSERDES_COM_CLK_ENABLE1			0x0038
+#define QSERDES_COM_SYS_CLK_CTRL		0x003C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
+#define QSERDES_COM_PLL_EN			0x0044
+#define QSERDES_COM_PLL_IVCO			0x0048
+#define QSERDES_COM_CMN_IETRIM			0x004C
+#define QSERDES_COM_CMN_IPTRIM			0x0050
+
+#define QSERDES_COM_CP_CTRL_MODE0		0x0060
+#define QSERDES_COM_CP_CTRL_MODE1		0x0064
+#define QSERDES_COM_PLL_RCTRL_MODE0		0x0068
+#define QSERDES_COM_PLL_RCTRL_MODE1		0x006C
+#define QSERDES_COM_PLL_CCTRL_MODE0		0x0070
+#define QSERDES_COM_PLL_CCTRL_MODE1		0x0074
+#define QSERDES_COM_PLL_CNTRL			0x0078
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM		0x007C
+#define QSERDES_COM_SYSCLK_EN_SEL		0x0080
+#define QSERDES_COM_CML_SYSCLK_SEL		0x0084
+#define QSERDES_COM_RESETSM_CNTRL		0x0088
+#define QSERDES_COM_RESETSM_CNTRL2		0x008C
+#define QSERDES_COM_LOCK_CMP_EN			0x0090
+#define QSERDES_COM_LOCK_CMP_CFG		0x0094
+#define QSERDES_COM_LOCK_CMP1_MODE0		0x0098
+#define QSERDES_COM_LOCK_CMP2_MODE0		0x009C
+#define QSERDES_COM_LOCK_CMP3_MODE0		0x00A0
+
+#define QSERDES_COM_DEC_START_MODE0		0x00B0
+#define QSERDES_COM_DEC_START_MODE1		0x00B4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00B8
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00BC
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00C0
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	0x00C4
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	0x00C8
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	0x00CC
+#define QSERDES_COM_INTEGLOOP_INITVAL		0x00D0
+#define QSERDES_COM_INTEGLOOP_EN		0x00D4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00D8
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00DC
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	0x00E0
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	0x00E4
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		0x00E8
+#define QSERDES_COM_VCO_TUNE_CTRL		0x00EC
+#define QSERDES_COM_VCO_TUNE_MAP		0x00F0
+
+#define QSERDES_COM_CMN_STATUS			0x0124
+#define QSERDES_COM_RESET_SM_STATUS		0x0128
+
+#define QSERDES_COM_CLK_SEL			0x0138
+#define QSERDES_COM_HSCLK_SEL			0x013C
+
+#define QSERDES_COM_CORECLK_DIV_MODE0		0x0148
+
+#define QSERDES_COM_SW_RESET			0x0150
+#define QSERDES_COM_CORE_CLK_EN			0x0154
+#define QSERDES_COM_C_READY_STATUS		0x0158
+#define QSERDES_COM_CMN_CONFIG			0x015C
+
+#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0164
+
+#define DP_PHY_PLL_POLL_SLEEP_US		500
+#define DP_PHY_PLL_POLL_TIMEOUT_US		10000
+
+#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
+#define DP_VCO_RATE_9720MHZDIV1000		9720000UL
+#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
+
+int dp_mux_set_parent_10nm(void *context, unsigned int reg, unsigned int val)
+{
+	struct mdss_pll_resources *dp_res = context;
+	int rc;
+	u32 auxclk_div;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP PLL resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= ~0x03;	/* bits 0 to 1 */
+
+	if (val == 0) /* mux parent index = 0 */
+		auxclk_div |= 1;
+	else if (val == 1) /* mux parent index = 1 */
+		auxclk_div |= 2;
+	else if (val == 2) /* mux parent index = 2 */
+		auxclk_div |= 0;
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_VCO_DIV, auxclk_div);
+	/* Make sure the PHY registers writes are done */
+	wmb();
+	pr_debug("%s: mux=%d auxclk_div=%x\n", __func__, val, auxclk_div);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	return 0;
+}
+
+int dp_mux_get_parent_10nm(void *context, unsigned int reg, unsigned int *val)
+{
+	int rc;
+	u32 auxclk_div = 0;
+	struct mdss_pll_resources *dp_res = context;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable dp_res resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= 0x03;
+
+	if (auxclk_div == 1) /* Default divider */
+		*val = 0;
+	else if (auxclk_div == 2)
+		*val = 1;
+	else if (auxclk_div == 0)
+		*val = 2;
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	pr_debug("%s: auxclk_div=%d, val=%d\n", __func__, auxclk_div, *val);
+
+	return 0;
+}
+
+static int dp_vco_pll_init_db_10nm(struct dp_pll_db *pdb,
+		unsigned long rate)
+{
+	struct mdss_pll_resources *dp_res = pdb->pll;
+	u32 spare_value = 0;
+
+	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+	pdb->lane_cnt = spare_value & 0x0F;
+	pdb->orientation = (spare_value & 0xF0) >> 4;
+
+	pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+			__func__, spare_value, pdb->lane_cnt, pdb->orientation);
+
+	switch (rate) {
+	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_9720MHZDIV1000);
+		pdb->hsclk_sel = 0x0c;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x6f;
+		pdb->lock_cmp2_mode0 = 0x08;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x1;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_10800MHZDIV1000);
+		pdb->hsclk_sel = 0x04;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x0f;
+		pdb->lock_cmp2_mode0 = 0x0e;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x1;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_10800MHZDIV1000);
+		pdb->hsclk_sel = 0x00;
+		pdb->dec_start_mode0 = 0x8c;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x00;
+		pdb->div_frac_start3_mode0 = 0x0a;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x1f;
+		pdb->lock_cmp2_mode0 = 0x1c;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x2;
+		pdb->lock_cmp_en = 0x00;
+		break;
+	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
+		pr_debug("%s: VCO rate: %ld\n", __func__,
+				DP_VCO_RATE_8100MHZDIV1000);
+		pdb->hsclk_sel = 0x03;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->integloop_gain0_mode0 = 0x3f;
+		pdb->integloop_gain1_mode0 = 0x00;
+		pdb->vco_tune_map = 0x00;
+		pdb->lock_cmp1_mode0 = 0x2f;
+		pdb->lock_cmp2_mode0 = 0x2a;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x0;
+		pdb->lock_cmp_en = 0x08;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int dp_config_vco_rate_10nm(struct dp_pll_vco_clk *vco,
+		unsigned long rate)
+{
+	u32 res = 0;
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+
+	res = dp_vco_pll_init_db_10nm(pdb, rate);
+	if (res) {
+		pr_err("VCO Init DB failed\n");
+		return res;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC2)
+			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x6d);
+		else
+			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x75);
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x7d);
+	}
+
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0e);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_SEL, 0x30);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+	/* Different for each clock rates */
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN0_MODE0, pdb->integloop_gain0_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN1_MODE0, pdb->integloop_gain1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE_MAP, pdb->vco_tune_map);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP3_MODE0, pdb->lock_cmp3_mode0);
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BG_TIMER, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORE_CLK_EN, 0x1f);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_IVCO, 0x07);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CP_CTRL_MODE0, 0x06);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	if (pdb->orientation == ORIENTATION_CC2)
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x4c);
+	else
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x5c);
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	/* TX Lane configuration */
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+
+	/* TX-0 register configuration */
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3d);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_BAND, 0x4);
+
+	/* TX-1 register configuration */
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3d);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_BAND, 0x4);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	/* dependent on the vco frequency */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, pdb->phy_vco_div);
+
+	return res;
+}
+
+static bool dp_10nm_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL lock status */
+	if (readl_poll_timeout_atomic((dp_res->pll_base +
+			QSERDES_COM_C_READY_STATUS),
+			status,
+			((status & BIT(0)) > 0),
+			DP_PHY_PLL_POLL_SLEEP_US,
+			DP_PHY_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: C_READY status is not high. Status=%x\n",
+				__func__, status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+static bool dp_10nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool phy_ready = true;
+
+	/* poll for PHY ready status */
+	if (readl_poll_timeout_atomic((dp_res->phy_base +
+			DP_PHY_STATUS),
+			status,
+			((status & (BIT(1))) > 0),
+			DP_PHY_PLL_POLL_SLEEP_US,
+			DP_PHY_PLL_POLL_TIMEOUT_US)) {
+		pr_err("%s: Phy_ready is not high. Status=%x\n",
+				__func__, status);
+		phy_ready = false;
+	}
+
+	return phy_ready;
+}
+
+static int dp_pll_enable_10nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+	u32 bias_en, drvr_en;
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x04);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
+	wmb(); /* Make sure the PHY register writes are done */
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+	wmb();	/* Make sure the PLL register writes are done */
+
+	if (!dp_10nm_pll_lock_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+	/* Make sure the PHY register writes are done */
+	wmb();
+	/* poll for PHY ready status */
+	if (!dp_10nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	pr_debug("%s: PLL is locked\n", __func__);
+
+	if (pdb->lane_cnt == 1) {
+		bias_en = 0x3e;
+		drvr_en = 0x13;
+	} else {
+		bias_en = 0x3f;
+		drvr_en = 0x10;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC1) {
+			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+				TXn_HIGHZ_DRVR_EN, drvr_en);
+			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+				TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		} else {
+			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+				TXn_HIGHZ_DRVR_EN, drvr_en);
+			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+				TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		}
+	} else {
+		MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+			TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+			TXn_TRANSCEIVER_BIAS_EN, bias_en);
+	}
+
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
+	udelay(2000);
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+
+	/*
+	 * Make sure all the register writes are completed before
+	 * doing any other operation
+	 */
+	wmb();
+
+	/* poll for PHY ready status */
+	if (!dp_10nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_DRV_LVL, 0x38);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_DRV_LVL, 0x38);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_EMP_POST1_LVL, 0x20);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_EMP_POST1_LVL, 0x20);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x06);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x07);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+lock_err:
+	return rc;
+}
+
+static int dp_pll_disable_10nm(struct clk_hw *hw)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	/* Assert DP PHY power down */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
+	/*
+	 * Make sure all the register writes to disable PLL are
+	 * completed before doing any other operation
+	 */
+	wmb();
+
+	return 0;
+}
+
+
+int dp_vco_prepare_10nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	pr_debug("rate=%ld\n", vco->rate);
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll resources\n");
+		goto error;
+	}
+
+	if ((dp_res->vco_cached_rate != 0)
+		&& (dp_res->vco_cached_rate == vco->rate)) {
+		rc = vco->hw.init->ops->set_rate(hw,
+			dp_res->vco_cached_rate, dp_res->vco_cached_rate);
+		if (rc) {
+			pr_err("index=%d vco_set_rate failed. rc=%d\n",
+				rc, dp_res->index);
+			mdss_pll_resource_enable(dp_res, false);
+			goto error;
+		}
+	}
+
+	rc = dp_pll_enable_10nm(hw);
+	if (rc) {
+		mdss_pll_resource_enable(dp_res, false);
+		pr_err("ndx=%d failed to enable dp pll\n",
+					dp_res->index);
+		goto error;
+	}
+
+	mdss_pll_resource_enable(dp_res, false);
+error:
+	return rc;
+}
+
+void dp_vco_unprepare_10nm(struct clk_hw *hw)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	if (!dp_res) {
+		pr_err("Invalid input parameter\n");
+		return;
+	}
+
+	if (!dp_res->pll_on &&
+		mdss_pll_resource_enable(dp_res, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return;
+	}
+	dp_res->vco_cached_rate = vco->rate;
+	dp_pll_disable_10nm(hw);
+
+	dp_res->handoff_resources = false;
+	mdss_pll_resource_enable(dp_res, false);
+	dp_res->pll_on = false;
+}
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	int rc;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	pr_debug("DP lane CLK rate=%ld\n", rate);
+
+	rc = dp_config_vco_rate_10nm(vco, rate);
+	if (rc)
+		pr_err("%s: Failed to set clk rate\n", __func__);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	vco->rate = rate;
+
+	return 0;
+}
+
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	int rc;
+	u32 div, hsclk_div, link_clk_div = 0;
+	u64 vco_rate;
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+	div &= 0x0f;
+
+	if (div == 12)
+		hsclk_div = 6; /* Default */
+	else if (div == 4)
+		hsclk_div = 4;
+	else if (div == 0)
+		hsclk_div = 2;
+	else if (div == 3)
+		hsclk_div = 1;
+	else {
+		pr_debug("unknown divider. forcing to default\n");
+		hsclk_div = 5;
+	}
+
+	div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_AUX_CFG2);
+	div >>= 2;
+
+	if ((div & 0x3) == 0)
+		link_clk_div = 5;
+	else if ((div & 0x3) == 1)
+		link_clk_div = 10;
+	else if ((div & 0x3) == 2)
+		link_clk_div = 20;
+	else
+		pr_err("%s: unsupported div. Phy_mode: %d\n", __func__, div);
+
+	if (link_clk_div == 20) {
+		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	} else {
+		if (hsclk_div == 6)
+			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
+		else if (hsclk_div == 4)
+			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+		else if (hsclk_div == 2)
+			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+		else
+			vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
+	}
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	dp_res->vco_cached_rate = vco->rate = vco_rate;
+	return (unsigned long)vco_rate;
+}
+
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+			unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+
+	if (rate <= vco->min_rate)
+		rrate = vco->min_rate;
+	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+	else
+		rrate = vco->max_rate;
+
+	pr_debug("%s: rrate=%ld\n", __func__, rrate);
+
+	*parent_rate = rrate;
+	return rrate;
+}
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
new file mode 100644
index 0000000..e8f3d45
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Display Port PLL driver block diagram for branch clocks
+ *
+ *		+------------------------------+
+ *		|         DP_VCO_CLK           |
+ *		|                              |
+ *		|    +-------------------+     |
+ *		|    |   (DP PLL/VCO)    |     |
+ *		|    +---------+---------+     |
+ *		|              v               |
+ *		|   +----------+-----------+   |
+ *		|   | hsclk_divsel_clk_src |   |
+ *		|   +----------+-----------+   |
+ *		+------------------------------+
+ *				|
+ *	 +------------<---------v------------>----------+
+ *	 |                                              |
+ * +-----v------------+                                 |
+ * | dp_link_clk_src  |                                 |
+ * |    divsel_ten    |                                 |
+ * +---------+--------+                                 |
+ *	|                                               |
+ *	|                                               |
+ *	v                                               v
+ * Input to DISPCC block                                |
+ * for link clk, crypto clk                             |
+ * and interface clock                                  |
+ *							|
+ *							|
+ *	+--------<------------+-----------------+---<---+
+ *	|                     |                 |
+ * +-------v------+  +--------v-----+  +--------v------+
+ * | vco_divided  |  | vco_divided  |  | vco_divided   |
+ * |    _clk_src  |  |    _clk_src  |  |    _clk_src   |
+ * |              |  |              |  |               |
+ * |divsel_six    |  |  divsel_two  |  |  divsel_four  |
+ * +-------+------+  +-----+--------+  +--------+------+
+ *         |	           |		        |
+ *	v------->----------v-------------<------v
+ *                         |
+ *		+----------+---------+
+ *		|   vco_divided_clk  |
+ *		|       _src_mux     |
+ *		+---------+----------+
+ *                        |
+ *                        v
+ *              Input to DISPCC block
+ *              for DP pixel clock
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-10nm.h"
+
+static struct dp_pll_db dp_pdb;
+static const struct clk_ops mux_clk_ops;
+
+static struct regmap_config dp_pll_10nm_cfg = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register = 0x910,
+};
+
+static struct regmap_bus dp_pixel_mux_regmap_ops = {
+	.reg_write = dp_mux_set_parent_10nm,
+	.reg_read = dp_mux_get_parent_10nm,
+};
+
+/* Op structures */
+static const struct clk_ops dp_10nm_vco_clk_ops = {
+	.recalc_rate = dp_vco_recalc_rate_10nm,
+	.set_rate = dp_vco_set_rate_10nm,
+	.round_rate = dp_vco_round_rate_10nm,
+	.prepare = dp_vco_prepare_10nm,
+	.unprepare = dp_vco_unprepare_10nm,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
+	.max_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_clk",
+		.parent_names = (const char *[]){ "xo_board" },
+		.num_parents = 1,
+		.ops = &dp_10nm_vco_clk_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_link_clk_divsel_ten = {
+	.div = 10,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_link_clk_divsel_ten",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_two_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
+	.div = 4,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_four_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_six_clk_src = {
+	.div = 6,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_six_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+
+static int clk_mux_determine_rate(struct clk_hw *hw,
+				     struct clk_rate_request *req)
+{
+	int ret = 0;
+
+	ret = __clk_mux_determine_rate_closest(hw, req);
+	if (ret)
+		return ret;
+
+	/* Set the new parent of mux if there is a new valid parent */
+	if (hw->clk && req->best_parent_hw->clk)
+		clk_set_parent(hw->clk, req->best_parent_hw->clk);
+
+	return 0;
+}
+
+static unsigned long mux_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk *div_clk = NULL, *vco_clk = NULL;
+	struct dp_pll_vco_clk *vco = NULL;
+
+	div_clk = clk_get_parent(hw->clk);
+	if (!div_clk)
+		return 0;
+
+	vco_clk = clk_get_parent(div_clk);
+	if (!vco_clk)
+		return 0;
+
+	vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
+	if (!vco)
+		return 0;
+
+	if (vco->rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
+		return (vco->rate / 6);
+	else if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		return (vco->rate / 4);
+	else
+		return (vco->rate / 2);
+}
+
+static struct clk_regmap_mux dp_vco_divided_clk_src_mux = {
+	.reg = 0x64,
+	.shift = 0,
+	.width = 2,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dp_vco_divided_clk_src_mux",
+			.parent_names =
+				(const char *[]){"dp_vco_divsel_two_clk_src",
+					"dp_vco_divsel_four_clk_src",
+					"dp_vco_divsel_six_clk_src"},
+			.num_parents = 3,
+			.ops = &mux_clk_ops,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		},
+	},
+};
+
+static struct clk_hw *mdss_dp_pllcc_10nm[] = {
+	[DP_VCO_CLK] = &dp_vco_clk.hw,
+	[DP_LINK_CLK_DIVSEL_TEN] = &dp_link_clk_divsel_ten.hw,
+	[DP_VCO_DIVIDED_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
+	[DP_VCO_DIVIDED_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
+	[DP_VCO_DIVIDED_SIX_CLK_SRC] = &dp_vco_divsel_six_clk_src.hw,
+	[DP_VCO_DIVIDED_CLK_SRC_MUX] = &dp_vco_divided_clk_src_mux.clkr.hw,
+};
+
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP, i = 0;
+	struct clk_onecell_data *clk_data;
+	struct clk *clk;
+	struct regmap *regmap;
+	int num_clks = ARRAY_SIZE(mdss_dp_pllcc_10nm);
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk__data), GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
+				sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
+	pll_res->priv = &dp_pdb;
+	dp_pdb.pll = pll_res;
+
+	/* Set client data for vco, mux and div clocks */
+	regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
+			pll_res, &dp_pll_10nm_cfg);
+	dp_vco_divided_clk_src_mux.clkr.regmap = regmap;
+	mux_clk_ops = clk_regmap_mux_closest_ops;
+	mux_clk_ops.determine_rate = clk_mux_determine_rate;
+	mux_clk_ops.recalc_rate = mux_recalc_rate;
+
+	dp_vco_clk.priv = pll_res;
+
+	for (i = DP_VCO_CLK; i <= DP_VCO_DIVIDED_CLK_SRC_MUX; i++) {
+		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+		clk = devm_clk_register(&pdev->dev,
+				mdss_dp_pllcc_10nm[i]);
+		if (IS_ERR(clk)) {
+			pr_err("clk registration failed for DP: %d\n",
+					pll_res->index);
+			rc = -EINVAL;
+			goto clk_reg_fail;
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node,
+			of_clk_src_onecell_get, clk_data);
+	if (rc) {
+		pr_err("%s: Clock register failed rc=%d\n", __func__, rc);
+		rc = -EPROBE_DEFER;
+	} else {
+		pr_debug("%s SUCCESS\n", __func__);
+	}
+	return 0;
+clk_reg_fail:
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
new file mode 100644
index 0000000..6260a49
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-10nm.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDSS_DP_PLL_10NM_H
+#define __MDSS_DP_PLL_10NM_H
+
+#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
+#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
+#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
+#define DP_VCO_HSCLK_RATE_8100MHZDIV1000	8100000UL
+
+struct dp_pll_db {
+	struct mdss_pll_resources *pll;
+
+	/* lane and orientation settings */
+	u8 lane_cnt;
+	u8 orientation;
+
+	/* COM PHY settings */
+	u32 hsclk_sel;
+	u32 dec_start_mode0;
+	u32 div_frac_start1_mode0;
+	u32 div_frac_start2_mode0;
+	u32 div_frac_start3_mode0;
+	u32 integloop_gain0_mode0;
+	u32 integloop_gain1_mode0;
+	u32 vco_tune_map;
+	u32 lock_cmp1_mode0;
+	u32 lock_cmp2_mode0;
+	u32 lock_cmp3_mode0;
+	u32 lock_cmp_en;
+
+	/* PHY vco divider */
+	u32 phy_vco_div;
+};
+
+int dp_vco_set_rate_10nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate);
+unsigned long dp_vco_recalc_rate_10nm(struct clk_hw *hw,
+				unsigned long parent_rate);
+long dp_vco_round_rate_10nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate);
+int dp_vco_prepare_10nm(struct clk_hw *hw);
+void dp_vco_unprepare_10nm(struct clk_hw *hw);
+int dp_mux_set_parent_10nm(void *context,
+				unsigned int reg, unsigned int val);
+int dp_mux_get_parent_10nm(void *context,
+				unsigned int reg, unsigned int *val);
+#endif /* __MDSS_DP_PLL_10NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.c
new file mode 100644
index 0000000..9c73bf6
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.c
@@ -0,0 +1,867 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ ***************************************************************************
+ ******** Display Port PLL driver block diagram for branch clocks **********
+ ***************************************************************************
+
+			+--------------------------+
+			|       DP_VCO_CLK         |
+			|			   |
+			|  +-------------------+   |
+			|  |   (DP PLL/VCO)    |   |
+			|  +---------+---------+   |
+			|	     v		   |
+			| +----------+-----------+ |
+			| | hsclk_divsel_clk_src | |
+			| +----------+-----------+ |
+			+--------------------------+
+				     |
+				     v
+	   +------------<------------|------------>-------------+
+	   |                         |                          |
++----------v----------+	  +----------v----------+    +----------v----------+
+|   dp_link_2x_clk    |	  | vco_divided_clk_src	|    | vco_divided_clk_src |
+|     divsel_five     |	  |			|    |			   |
+v----------+----------v	  |	divsel_two	|    |	   divsel_four	   |
+	   |		  +----------+----------+    +----------+----------+
+	   |                         |                          |
+	   v			     v				v
+				     |	+---------------------+	|
+  Input to MMSSCC block		     |	|    (aux_clk_ops)    |	|
+  for link clk, crypto clk	     +-->   vco_divided_clk   <-+
+  and interface clock			|	_src_mux      |
+					+----------+----------+
+						   |
+						   v
+					 Input to MMSSCC block
+					 for DP pixel clock
+
+ ******************************************************************************
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/usb/usbpd.h>
+
+#include <dt-bindings/clock/mdss-14nm-pll-clk.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-14nm.h"
+
+static struct dp_pll_db dp_pdb;
+static const struct clk_ops mux_clk_ops;
+
+static struct regmap_config dp_pll_14nm_cfg = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register = 0x910,
+};
+
+static struct regmap_bus dp_pixel_mux_regmap_ops = {
+	.reg_write = dp_mux_set_parent_14nm,
+	.reg_read = dp_mux_get_parent_14nm,
+};
+
+/* Op structures */
+static const struct clk_ops dp_14nm_vco_clk_ops = {
+	.recalc_rate = dp_vco_recalc_rate_14nm,
+	.set_rate = dp_vco_set_rate_14nm,
+	.round_rate = dp_vco_round_rate_14nm,
+	.prepare = dp_vco_prepare_14nm,
+	.unprepare = dp_vco_unprepare_14nm,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
+	.max_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_clk",
+		.parent_names = (const char *[]){ "xo_board" },
+		.num_parents = 1,
+		.ops = &dp_14nm_vco_clk_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_phy_pll_link_clk = {
+	.div = 5,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_phy_pll_link_clk",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_two_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
+	.div = 4,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_four_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static int clk_mux_determine_rate(struct clk_hw *hw,
+				     struct clk_rate_request *req)
+{
+	int ret = 0;
+
+	ret = __clk_mux_determine_rate_closest(hw, req);
+	if (ret)
+		return ret;
+
+	/* Set the new parent of mux if there is a new valid parent */
+	if (hw->clk && req->best_parent_hw->clk)
+		clk_set_parent(hw->clk, req->best_parent_hw->clk);
+
+	return 0;
+}
+
+
+static unsigned long mux_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk *div_clk = NULL, *vco_clk = NULL;
+	struct dp_pll_vco_clk *vco = NULL;
+
+	div_clk = clk_get_parent(hw->clk);
+	if (!div_clk)
+		return 0;
+
+	vco_clk = clk_get_parent(div_clk);
+	if (!vco_clk)
+		return 0;
+
+	vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
+	if (!vco)
+		return 0;
+
+	if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		return (vco->rate / 4);
+	else
+		return (vco->rate / 2);
+}
+
+static struct clk_regmap_mux dp_phy_pll_vco_div_clk = {
+	.reg = 0x64,
+	.shift = 0,
+	.width = 1,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dp_phy_pll_vco_div_clk",
+			.parent_names =
+				(const char *[]){"dp_vco_divsel_two_clk_src",
+					"dp_vco_divsel_four_clk_src"},
+			.num_parents = 2,
+			.ops = &mux_clk_ops,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		},
+	},
+};
+
+static struct clk_hw *mdss_dp_pllcc_14nm[] = {
+	[DP_VCO_CLK] = &dp_vco_clk.hw,
+	[DP_PHY_PLL_LINK_CLK] = &dp_phy_pll_link_clk.hw,
+	[DP_VCO_DIVSEL_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
+	[DP_VCO_DIVSEL_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
+	[DP_PHY_PLL_VCO_DIV_CLK] = &dp_phy_pll_vco_div_clk.clkr.hw,
+};
+
+
+int dp_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val)
+{
+	struct mdss_pll_resources *dp_res = context;
+	int rc;
+	u32 auxclk_div;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP PLL resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= ~0x03;	/* bits 0 to 1 */
+
+	if (val == 0) /* mux parent index = 0 */
+		auxclk_div |= 1;
+	else if (val == 1) /* mux parent index = 1 */
+		auxclk_div |= 2;
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_VCO_DIV, auxclk_div);
+	/* Make sure the PHY registers writes are done */
+	wmb();
+	pr_debug("mux=%d auxclk_div=%x\n", val, auxclk_div);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	return 0;
+}
+
+int dp_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val)
+{
+	int rc;
+	u32 auxclk_div = 0;
+	struct mdss_pll_resources *dp_res = context;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable dp_res resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= 0x03;
+
+	if (auxclk_div == 1) /* Default divider */
+		*val = 0;
+	else if (auxclk_div == 2)
+		*val = 1;
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	pr_debug("auxclk_div=%d, val=%d\n", auxclk_div, *val);
+
+	return 0;
+}
+
+static int dp_vco_pll_init_db_14nm(struct dp_pll_db *pdb,
+		unsigned long rate)
+{
+	struct mdss_pll_resources *dp_res = pdb->pll;
+	u32 spare_value = 0;
+
+	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+	pdb->lane_cnt = spare_value & 0x0F;
+	pdb->orientation = (spare_value & 0xF0) >> 4;
+
+	pr_debug("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+			spare_value, pdb->lane_cnt, pdb->orientation);
+
+	switch (rate) {
+	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+		pdb->hsclk_sel = 0x2c;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->lock_cmp1_mode0 = 0xbf;
+		pdb->lock_cmp2_mode0 = 0x21;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x1;
+		break;
+	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+		pdb->hsclk_sel = 0x24;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->lock_cmp1_mode0 = 0x3f;
+		pdb->lock_cmp2_mode0 = 0x38;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x1;
+		break;
+	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+		pdb->hsclk_sel = 0x20;
+		pdb->dec_start_mode0 = 0x8c;
+		pdb->div_frac_start1_mode0 = 0x00;
+		pdb->div_frac_start2_mode0 = 0x00;
+		pdb->div_frac_start3_mode0 = 0x0a;
+		pdb->lock_cmp1_mode0 = 0x7f;
+		pdb->lock_cmp2_mode0 = 0x70;
+		pdb->lock_cmp3_mode0 = 0x00;
+		pdb->phy_vco_div = 0x2;
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int dp_config_vco_rate_14nm(struct dp_pll_vco_clk *vco,
+		unsigned long rate)
+{
+	u32 res = 0;
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+
+	res = dp_vco_pll_init_db_14nm(pdb, rate);
+	if (res) {
+		pr_err("VCO Init DB failed\n");
+		return res;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC2)
+			MDSS_PLL_REG_W(dp_res->phy_base,
+				DP_PHY_PD_CTL, 0x2d);
+		else
+			MDSS_PLL_REG_W(dp_res->phy_base,
+				DP_PHY_PD_CTL, 0x35);
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			DP_PHY_PD_CTL, 0x3d);
+	}
+
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_SVS_MODE_CLK_SEL, 0x01);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_CLK_SELECT, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_SYS_CLK_CTRL, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3f);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_CLK_ENABLE1, 0x0e);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_BG_CTRL, 0x0f);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_CLK_SELECT, 0x30);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_PLL_IVCO, 0x0f);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_PLL_CCTRL_MODE0, 0x28);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_CP_CTRL_MODE0, 0x0b);
+
+	/* Parameters dependent on vco clock frequency */
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP3_MODE0, pdb->lock_cmp3_mode0);
+
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x40);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE_MAP, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_BG_TIMER, 0x08);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_CORECLK_DIV, 0x05);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE1_MODE0, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE2_MODE0, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+	wmb(); /* make sure write happens */
+
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_CORE_CLK_EN, 0x0f);
+	wmb(); /* make sure write happens */
+
+	if (pdb->orientation == ORIENTATION_CC2)
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0xc8);
+	else
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0xd8);
+	wmb(); /* make sure write happens */
+
+	/* TX Lane configuration */
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+
+	/* TX-0 register configuration */
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_INTERFACE_SELECT, 0x3d);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_TX_EMP_POST1_LVL, 0x2b);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_TX_DRV_LVL, 0x2f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_TX_BAND, 0x4);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX, 0x12);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX, 0x12);
+
+	/* TX-1 register configuration */
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_INTERFACE_SELECT, 0x3d);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_TX_EMP_POST1_LVL, 0x2b);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_TX_DRV_LVL, 0x2f);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_TX_BAND, 0x4);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_TX, 0x12);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_RES_CODE_LANE_OFFSET_RX, 0x12);
+	wmb(); /* make sure write happens */
+
+	/* PHY VCO divider programming */
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		DP_PHY_VCO_DIV, pdb->phy_vco_div);
+	wmb(); /* make sure write happens */
+
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_CMN_CONFIG, 0x02);
+	wmb(); /* make sure write happens */
+
+	return res;
+}
+
+static bool dp_14nm_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL lock status */
+	if (readl_poll_timeout_atomic((dp_res->pll_base +
+			QSERDES_COM_C_READY_STATUS),
+			status,
+			((status & BIT(0)) > 0),
+			DP_PLL_POLL_SLEEP_US,
+			DP_PLL_POLL_TIMEOUT_US)) {
+		pr_err("C_READY status is not high. Status=%x\n", status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+static bool dp_14nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool phy_ready = true;
+
+	/* poll for PHY ready status */
+	if (readl_poll_timeout_atomic((dp_res->phy_base +
+			DP_PHY_STATUS),
+			status,
+			((status & (BIT(1) | BIT(0))) > 0),
+			DP_PHY_POLL_SLEEP_US,
+			DP_PHY_POLL_TIMEOUT_US)) {
+		pr_err("Phy_ready is not high. Status=%x\n", status);
+		phy_ready = false;
+	}
+
+	return phy_ready;
+}
+
+static int dp_pll_enable_14nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	u32 bias_en, drvr_en;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db *pdb = (struct dp_pll_db *)dp_res->priv;
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
+	wmb(); /* Make sure the PHY register writes are done */
+
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_RESETSM_CNTRL, 0x20);
+	wmb();	/* Make sure the PLL register writes are done */
+
+	udelay(900); /* hw recommended delay for full PU */
+
+	if (!dp_14nm_pll_lock_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+	wmb();	/* Make sure the PHY register writes are done */
+
+	udelay(10); /* hw recommended delay */
+
+	if (!dp_14nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	pr_debug("PLL is locked\n");
+
+	if (pdb->lane_cnt == 1) {
+		bias_en = 0x3e;
+		drvr_en = 0x13;
+	} else {
+		bias_en = 0x3f;
+		drvr_en = 0x10;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC1) {
+			MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN, bias_en);
+			MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN, drvr_en);
+		} else {
+			MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN, bias_en);
+			MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN, drvr_en);
+		}
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		MDSS_PLL_REG_W(dp_res->phy_base,
+			QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN, drvr_en);
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX0_OFFSET + TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->phy_base,
+		QSERDES_TX1_OFFSET + TXn_TX_POL_INV, 0x0a);
+
+	/*
+	 * Switch DP Mainlink clock (cc_dpphy_link_clk) from DP
+	 * controller side with final frequency
+	 */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
+	wmb();	/* Make sure the PHY register writes are done */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+	wmb();	/* Make sure the PHY register writes are done */
+
+lock_err:
+	return rc;
+}
+
+static int dp_pll_disable_14nm(struct clk_hw *hw)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	/* Assert DP PHY power down */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
+	/*
+	 * Make sure all the register writes to disable PLL are
+	 * completed before doing any other operation
+	 */
+	wmb();
+
+	return 0;
+}
+
+
+int dp_vco_prepare_14nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	pr_debug("rate=%ld\n", vco->rate);
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll resources\n");
+		goto error;
+	}
+
+	if ((dp_res->vco_cached_rate != 0)
+		&& (dp_res->vco_cached_rate == vco->rate)) {
+		rc = vco->hw.init->ops->set_rate(hw,
+			dp_res->vco_cached_rate, dp_res->vco_cached_rate);
+		if (rc) {
+			pr_err("index=%d vco_set_rate failed. rc=%d\n",
+				rc, dp_res->index);
+			mdss_pll_resource_enable(dp_res, false);
+			goto error;
+		}
+	}
+
+	rc = dp_pll_enable_14nm(hw);
+	if (rc) {
+		mdss_pll_resource_enable(dp_res, false);
+		pr_err("ndx=%d failed to enable dp pll\n",
+					dp_res->index);
+		goto error;
+	}
+
+	mdss_pll_resource_enable(dp_res, false);
+error:
+	return rc;
+}
+
+void dp_vco_unprepare_14nm(struct clk_hw *hw)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	if (!dp_res) {
+		pr_err("Invalid input parameter\n");
+		return;
+	}
+
+	if (!dp_res->pll_on &&
+		mdss_pll_resource_enable(dp_res, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return;
+	}
+	dp_res->vco_cached_rate = vco->rate;
+	dp_pll_disable_14nm(hw);
+
+	dp_res->handoff_resources = false;
+	mdss_pll_resource_enable(dp_res, false);
+	dp_res->pll_on = false;
+}
+
+int dp_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	int rc;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	pr_debug("DP lane CLK rate=%ld\n", rate);
+
+	rc = dp_config_vco_rate_14nm(vco, rate);
+	if (rc)
+		pr_err("Failed to set clk rate\n");
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	vco->rate = rate;
+
+	return 0;
+}
+
+unsigned long dp_vco_recalc_rate_14nm(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	int rc;
+	u32 div, hsclk_div, link2xclk_div = 0;
+	u64 vco_rate;
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	if (is_gdsc_disabled(dp_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+	div &= 0x0f;
+
+	if (div == 12)
+		hsclk_div = 5; /* Default */
+	else if (div == 4)
+		hsclk_div = 3;
+	else if (div == 0)
+		hsclk_div = 2;
+	else {
+		pr_debug("unknown divider. forcing to default\n");
+		hsclk_div = 5;
+	}
+
+	div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_MODE);
+
+	if (div & 0xd8)
+		pr_err("DP PAR Rate not correct\n");
+
+	if ((div & 0x3) == 1)
+		link2xclk_div = 10;
+	else if ((div & 0x3) == 0)
+		link2xclk_div = 5;
+	else
+		pr_err("unsupported div. Phy_mode: %d\n", div);
+
+	if (link2xclk_div == 10) {
+		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	} else {
+		if (hsclk_div == 5)
+			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
+		else if (hsclk_div == 3)
+			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+		else
+			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+	}
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	dp_res->vco_cached_rate = vco->rate = vco_rate;
+	return (unsigned long)vco_rate;
+}
+
+long dp_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
+			unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+
+	if (rate <= vco->min_rate)
+		rrate = vco->min_rate;
+	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	else
+		rrate = vco->max_rate;
+
+	pr_debug("rrate=%ld\n", rrate);
+
+	*parent_rate = rrate;
+	return rrate;
+}
+
+int dp_pll_clock_register_14nm(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP, i = 0;
+	struct clk_onecell_data *clk_data;
+	struct clk *clk;
+	struct regmap *regmap;
+	int num_clks = ARRAY_SIZE(mdss_dp_pllcc_14nm);
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
+				sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
+	pll_res->priv = &dp_pdb;
+	dp_pdb.pll = pll_res;
+
+	/* Set client data for vco, mux and div clocks */
+	regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
+			pll_res, &dp_pll_14nm_cfg);
+	dp_phy_pll_vco_div_clk.clkr.regmap = regmap;
+	mux_clk_ops = clk_regmap_mux_closest_ops;
+	mux_clk_ops.determine_rate = clk_mux_determine_rate;
+	mux_clk_ops.recalc_rate = mux_recalc_rate;
+
+	dp_vco_clk.priv = pll_res;
+
+	for (i = DP_VCO_CLK; i <= DP_PHY_PLL_VCO_DIV_CLK; i++) {
+		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+		clk = devm_clk_register(&pdev->dev,
+				mdss_dp_pllcc_14nm[i]);
+		if (IS_ERR(clk)) {
+			pr_err("clk registration failed for DP: %d\n",
+					pll_res->index);
+			rc = -EINVAL;
+			goto clk_reg_fail;
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node,
+			of_clk_src_onecell_get, clk_data);
+	if (rc) {
+		pr_err("Clock register failed rc=%d\n", rc);
+		rc = -EPROBE_DEFER;
+	} else {
+		pr_debug("SUCCESS\n");
+	}
+	return 0;
+clk_reg_fail:
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.h
new file mode 100644
index 0000000..4308fd6
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-14nm.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDSS_DP_PLL_14NM_H
+#define __MDSS_DP_PLL_14NM_H
+
+#define DP_PHY_REVISION_ID0			0x0000
+#define DP_PHY_REVISION_ID1			0x0004
+#define DP_PHY_REVISION_ID2			0x0008
+#define DP_PHY_REVISION_ID3			0x000C
+
+#define DP_PHY_CFG				0x0010
+#define DP_PHY_CFG_1				0x0014
+#define DP_PHY_PD_CTL				0x0018
+#define DP_PHY_MODE				0x001C
+
+#define DP_PHY_AUX_CFG0				0x0020
+#define DP_PHY_AUX_CFG1				0x0024
+#define DP_PHY_AUX_CFG2				0x0028
+#define DP_PHY_AUX_CFG3				0x002C
+#define DP_PHY_AUX_CFG4				0x0030
+#define DP_PHY_AUX_CFG5				0x0034
+#define DP_PHY_AUX_CFG6				0x0038
+#define DP_PHY_AUX_CFG7				0x003C
+#define DP_PHY_AUX_CFG8				0x0040
+#define DP_PHY_AUX_CFG9				0x0044
+#define DP_PHY_AUX_INTERRUPT_MASK		0x0048
+#define DP_PHY_AUX_INTERRUPT_CLEAR		0x004C
+#define DP_PHY_AUX_BIST_CFG			0x0050
+
+#define DP_PHY_VCO_DIV				0x0068
+#define DP_PHY_TX0_TX1_LANE_CTL			0x006C
+
+#define DP_PHY_TX2_TX3_LANE_CTL			0x0088
+#define DP_PHY_SPARE0				0x00AC
+#define DP_PHY_STATUS				0x00C0
+
+/* Tx registers */
+#define QSERDES_TX0_OFFSET			0x0400
+#define QSERDES_TX1_OFFSET			0x0800
+
+#define TXn_BIST_MODE_LANENO			0x0000
+#define TXn_CLKBUF_ENABLE			0x0008
+#define TXn_TX_EMP_POST1_LVL			0x000C
+
+#define TXn_TX_DRV_LVL				0x001C
+
+#define TXn_RESET_TSYNC_EN			0x0024
+#define TXn_PRE_STALL_LDO_BOOST_EN		0x0028
+#define TXn_TX_BAND				0x002C
+#define TXn_SLEW_CNTL				0x0030
+#define TXn_INTERFACE_SELECT			0x0034
+
+#define TXn_RES_CODE_LANE_TX			0x003C
+#define TXn_RES_CODE_LANE_RX			0x0040
+#define TXn_RES_CODE_LANE_OFFSET_TX		0x0044
+#define TXn_RES_CODE_LANE_OFFSET_RX		0x0048
+
+#define TXn_DEBUG_BUS_SEL			0x0058
+#define TXn_TRANSCEIVER_BIAS_EN			0x005C
+#define TXn_HIGHZ_DRVR_EN			0x0060
+#define TXn_TX_POL_INV				0x0064
+#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0068
+
+#define TXn_LANE_MODE_1				0x008C
+
+#define TXn_TRAN_DRVR_EMP_EN			0x00C0
+#define TXn_TX_INTERFACE_MODE			0x00C4
+
+#define TXn_VMODE_CTRL1				0x00F0
+
+
+/* PLL register offset */
+#define QSERDES_COM_ATB_SEL1			0x0000
+#define QSERDES_COM_ATB_SEL2			0x0004
+#define QSERDES_COM_FREQ_UPDATE			0x0008
+#define QSERDES_COM_BG_TIMER			0x000C
+#define QSERDES_COM_SSC_EN_CENTER		0x0010
+#define QSERDES_COM_SSC_ADJ_PER1		0x0014
+#define QSERDES_COM_SSC_ADJ_PER2		0x0018
+#define QSERDES_COM_SSC_PER1			0x001C
+#define QSERDES_COM_SSC_PER2			0x0020
+#define QSERDES_COM_SSC_STEP_SIZE1		0x0024
+#define QSERDES_COM_SSC_STEP_SIZE2		0x0028
+#define QSERDES_COM_POST_DIV			0x002C
+#define QSERDES_COM_POST_DIV_MUX		0x0030
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0034
+#define QSERDES_COM_CLK_ENABLE1			0x0038
+#define QSERDES_COM_SYS_CLK_CTRL		0x003C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0040
+#define QSERDES_COM_PLL_EN			0x0044
+#define QSERDES_COM_PLL_IVCO			0x0048
+#define QSERDES_COM_LOCK_CMP1_MODE0		0x004C
+#define QSERDES_COM_LOCK_CMP2_MODE0		0x0050
+#define QSERDES_COM_LOCK_CMP3_MODE0		0x0054
+
+#define QSERDES_COM_CP_CTRL_MODE0		0x0078
+#define QSERDES_COM_CP_CTRL_MODE1		0x007C
+#define QSERDES_COM_PLL_RCTRL_MODE0		0x0084
+#define QSERDES_COM_PLL_CCTRL_MODE0		0x0090
+#define QSERDES_COM_PLL_CNTRL			0x009C
+
+#define QSERDES_COM_SYSCLK_EN_SEL		0x00AC
+#define QSERDES_COM_CML_SYSCLK_SEL		0x00B0
+#define QSERDES_COM_RESETSM_CNTRL		0x00B4
+#define QSERDES_COM_RESETSM_CNTRL2		0x00B8
+#define QSERDES_COM_LOCK_CMP_EN			0x00C8
+#define QSERDES_COM_LOCK_CMP_CFG		0x00CC
+
+
+#define QSERDES_COM_DEC_START_MODE0		0x00D0
+#define QSERDES_COM_DEC_START_MODE1		0x00D4
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00DC
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00E0
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00E4
+
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x0108
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x010C
+#define QSERDES_COM_VCO_TUNE_CTRL		0x0124
+#define QSERDES_COM_VCO_TUNE_MAP		0x0128
+#define QSERDES_COM_VCO_TUNE1_MODE0		0x012C
+#define QSERDES_COM_VCO_TUNE2_MODE0		0x0130
+
+#define QSERDES_COM_CMN_STATUS			0x015C
+#define QSERDES_COM_RESET_SM_STATUS		0x0160
+
+#define QSERDES_COM_BG_CTRL			0x0170
+#define QSERDES_COM_CLK_SELECT			0x0174
+#define QSERDES_COM_HSCLK_SEL			0x0178
+#define QSERDES_COM_CORECLK_DIV			0x0184
+#define QSERDES_COM_SW_RESET			0x0188
+#define QSERDES_COM_CORE_CLK_EN			0x018C
+#define QSERDES_COM_C_READY_STATUS		0x0190
+#define QSERDES_COM_CMN_CONFIG			0x0194
+#define QSERDES_COM_SVS_MODE_CLK_SEL		0x019C
+
+#define DP_PLL_POLL_SLEEP_US			500
+#define DP_PLL_POLL_TIMEOUT_US			10000
+
+#define DP_PHY_POLL_SLEEP_US			500
+#define DP_PHY_POLL_TIMEOUT_US			10000
+
+#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
+#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
+
+#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
+#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
+#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
+
+struct dp_pll_db {
+	struct mdss_pll_resources *pll;
+
+	/* lane and orientation settings */
+	u8 lane_cnt;
+	u8 orientation;
+
+	/* COM PHY settings */
+	u32 hsclk_sel;
+	u32 dec_start_mode0;
+	u32 div_frac_start1_mode0;
+	u32 div_frac_start2_mode0;
+	u32 div_frac_start3_mode0;
+	u32 lock_cmp1_mode0;
+	u32 lock_cmp2_mode0;
+	u32 lock_cmp3_mode0;
+
+	/* PHY vco divider */
+	u32 phy_vco_div;
+};
+
+int dp_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate);
+unsigned long dp_vco_recalc_rate_14nm(struct clk_hw *hw,
+				unsigned long parent_rate);
+long dp_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate);
+int dp_vco_prepare_14nm(struct clk_hw *hw);
+void dp_vco_unprepare_14nm(struct clk_hw *hw);
+int dp_mux_set_parent_14nm(void *context,
+				unsigned int reg, unsigned int val);
+int dp_mux_get_parent_14nm(void *context,
+				unsigned int reg, unsigned int *val);
+#endif /* __MDSS_DP_PLL_14NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
new file mode 100644
index 0000000..8594e6f
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm-util.c
@@ -0,0 +1,725 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"[dp-pll] %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/usb/usbpd.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-7nm.h"
+
+#define DP_PHY_CFG				0x0010
+#define DP_PHY_PD_CTL				0x0018
+#define DP_PHY_MODE				0x001C
+
+#define DP_PHY_AUX_CFG2				0x0028
+
+#define DP_PHY_VCO_DIV				0x0070
+#define DP_PHY_TX0_TX1_LANE_CTL			0x0078
+#define DP_PHY_TX2_TX3_LANE_CTL			0x009C
+
+#define DP_PHY_SPARE0				0x00C8
+#define DP_PHY_STATUS				0x00DC
+
+/* Tx registers */
+#define TXn_CLKBUF_ENABLE			0x0008
+#define TXn_TX_EMP_POST1_LVL			0x000C
+
+#define TXn_TX_DRV_LVL				0x0014
+
+#define TXn_RESET_TSYNC_EN			0x001C
+#define TXn_PRE_STALL_LDO_BOOST_EN		0x0020
+#define TXn_TX_BAND				0x0024
+#define TXn_INTERFACE_SELECT			0x002C
+
+#define TXn_RES_CODE_LANE_OFFSET_TX		0x003C
+#define TXn_RES_CODE_LANE_OFFSET_RX		0x0040
+
+#define TXn_TRANSCEIVER_BIAS_EN			0x0054
+#define TXn_HIGHZ_DRVR_EN			0x0058
+#define TXn_TX_POL_INV				0x005C
+#define TXn_PARRATE_REC_DETECT_IDLE_EN		0x0060
+
+#define TXn_TRAN_DRVR_EMP_EN			0x00B8
+#define TXn_TX_INTERFACE_MODE			0x00BC
+
+#define TXn_VMODE_CTRL1				0x00E8
+
+/* PLL register offset */
+#define QSERDES_COM_BG_TIMER			0x000C
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		0x0044
+#define QSERDES_COM_CLK_ENABLE1			0x0048
+#define QSERDES_COM_SYS_CLK_CTRL		0x004C
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		0x0050
+#define QSERDES_COM_PLL_IVCO			0x0058
+
+#define QSERDES_COM_CP_CTRL_MODE0		0x0074
+#define QSERDES_COM_PLL_RCTRL_MODE0		0x007C
+#define QSERDES_COM_PLL_CCTRL_MODE0		0x0084
+#define QSERDES_COM_SYSCLK_EN_SEL		0x0094
+#define QSERDES_COM_RESETSM_CNTRL		0x009C
+#define QSERDES_COM_LOCK_CMP_EN			0x00A4
+#define QSERDES_COM_LOCK_CMP1_MODE0		0x00AC
+#define QSERDES_COM_LOCK_CMP2_MODE0		0x00B0
+
+#define QSERDES_COM_DEC_START_MODE0		0x00BC
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	0x00CC
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	0x00D0
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	0x00D4
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	0x00EC
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	0x00F0
+#define QSERDES_COM_VCO_TUNE_CTRL		0x0108
+#define QSERDES_COM_VCO_TUNE_MAP		0x010C
+
+#define QSERDES_COM_CLK_SEL			0x0154
+#define QSERDES_COM_HSCLK_SEL			0x0158
+
+#define QSERDES_COM_CORECLK_DIV_MODE0		0x0168
+
+#define QSERDES_COM_CORE_CLK_EN			0x0174
+#define QSERDES_COM_C_READY_STATUS		0x0178
+#define QSERDES_COM_CMN_CONFIG			0x017C
+
+#define QSERDES_COM_SVS_MODE_CLK_SEL		0x0184
+
+#define DP_PHY_PLL_POLL_SLEEP_US		500
+#define DP_PHY_PLL_POLL_TIMEOUT_US		10000
+
+#define DP_VCO_RATE_8100MHZDIV1000		8100000UL
+#define DP_VCO_RATE_9720MHZDIV1000		9720000UL
+#define DP_VCO_RATE_10800MHZDIV1000		10800000UL
+
+int dp_mux_set_parent_7nm(void *context, unsigned int reg, unsigned int val)
+{
+	struct mdss_pll_resources *dp_res = context;
+	int rc;
+	u32 auxclk_div;
+
+	if (!context) {
+		pr_err("invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP PLL resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= ~0x03;
+
+	if (val == 0)
+		auxclk_div |= 1;
+	else if (val == 1)
+		auxclk_div |= 2;
+	else if (val == 2)
+		auxclk_div |= 0;
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, auxclk_div);
+	/* Make sure the PHY registers writes are done */
+	wmb();
+	pr_debug("mux=%d auxclk_div=%x\n", val, auxclk_div);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	return 0;
+}
+
+int dp_mux_get_parent_7nm(void *context, unsigned int reg, unsigned int *val)
+{
+	int rc;
+	u32 auxclk_div = 0;
+	struct mdss_pll_resources *dp_res = context;
+
+	if (!context || !val) {
+		pr_err("invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable dp_res resources\n");
+		return rc;
+	}
+
+	auxclk_div = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_VCO_DIV);
+	auxclk_div &= 0x03;
+
+	if (auxclk_div == 1) /* Default divider */
+		*val = 0;
+	else if (auxclk_div == 2)
+		*val = 1;
+	else if (auxclk_div == 0)
+		*val = 2;
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	pr_debug("auxclk_div=%d, val=%d\n", auxclk_div, *val);
+
+	return 0;
+}
+
+static int dp_vco_pll_init_db_7nm(struct dp_pll_db_7nm *pdb,
+		unsigned long rate)
+{
+	struct mdss_pll_resources *dp_res = pdb->pll;
+	u32 spare_value = 0;
+
+	spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+	pdb->lane_cnt = spare_value & 0x0F;
+	pdb->orientation = (spare_value & 0xF0) >> 4;
+
+	pr_debug("spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+			spare_value, pdb->lane_cnt, pdb->orientation);
+
+	pdb->div_frac_start1_mode0 = 0x00;
+	pdb->integloop_gain0_mode0 = 0x3f;
+	pdb->integloop_gain1_mode0 = 0x00;
+	pdb->vco_tune_map = 0x00;
+	pdb->cmn_config = 0x02;
+	pdb->txn_tran_drv_emp_en = 0xf;
+
+	switch (rate) {
+	case DP_VCO_HSCLK_RATE_1620MHZDIV1000:
+		pr_debug("VCO rate: %ld\n", DP_VCO_RATE_9720MHZDIV1000);
+		pdb->hsclk_sel = 0x05;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->lock_cmp1_mode0 = 0x6f;
+		pdb->lock_cmp2_mode0 = 0x08;
+		pdb->phy_vco_div = 0x1;
+		pdb->lock_cmp_en = 0x04;
+		break;
+	case DP_VCO_HSCLK_RATE_2700MHZDIV1000:
+		pr_debug("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
+		pdb->hsclk_sel = 0x03;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->lock_cmp1_mode0 = 0x0f;
+		pdb->lock_cmp2_mode0 = 0x0e;
+		pdb->phy_vco_div = 0x1;
+		pdb->lock_cmp_en = 0x08;
+		break;
+	case DP_VCO_HSCLK_RATE_5400MHZDIV1000:
+		pr_debug("VCO rate: %ld\n", DP_VCO_RATE_10800MHZDIV1000);
+		pdb->hsclk_sel = 0x01;
+		pdb->dec_start_mode0 = 0x8c;
+		pdb->div_frac_start2_mode0 = 0x00;
+		pdb->div_frac_start3_mode0 = 0x0a;
+		pdb->lock_cmp1_mode0 = 0x1f;
+		pdb->lock_cmp2_mode0 = 0x1c;
+		pdb->phy_vco_div = 0x2;
+		pdb->lock_cmp_en = 0x08;
+		break;
+	case DP_VCO_HSCLK_RATE_8100MHZDIV1000:
+		pr_debug("VCO rate: %ld\n", DP_VCO_RATE_8100MHZDIV1000);
+		pdb->hsclk_sel = 0x00;
+		pdb->dec_start_mode0 = 0x69;
+		pdb->div_frac_start2_mode0 = 0x80;
+		pdb->div_frac_start3_mode0 = 0x07;
+		pdb->lock_cmp1_mode0 = 0x2f;
+		pdb->lock_cmp2_mode0 = 0x2a;
+		pdb->phy_vco_div = 0x0;
+		pdb->lock_cmp_en = 0x08;
+		break;
+	default:
+		pr_err("unsupported rate %ld\n", rate);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int dp_config_vco_rate_7nm(struct dp_pll_vco_clk *vco,
+		unsigned long rate)
+{
+	u32 res = 0;
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db_7nm *pdb = (struct dp_pll_db_7nm *)dp_res->priv;
+
+	res = dp_vco_pll_init_db_7nm(pdb, rate);
+	if (res) {
+		pr_err("VCO Init DB failed\n");
+		return res;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC2)
+			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x6d);
+		else
+			MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x75);
+	} else {
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x7d);
+	}
+
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL, 0x05);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x3b);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0c);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CLK_SEL, 0x30);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_HSCLK_SEL, pdb->hsclk_sel);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_IVCO, 0x0f);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP_EN, pdb->lock_cmp_en);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_CCTRL_MODE0, 0x36);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_PLL_RCTRL_MODE0, 0x16);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CP_CTRL_MODE0, 0x06);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DEC_START_MODE0, pdb->dec_start_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START1_MODE0, pdb->div_frac_start1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START2_MODE0, pdb->div_frac_start2_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_DIV_FRAC_START3_MODE0, pdb->div_frac_start3_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_CMN_CONFIG, pdb->cmn_config);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN0_MODE0, pdb->integloop_gain0_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_INTEGLOOP_GAIN1_MODE0, pdb->integloop_gain1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_VCO_TUNE_MAP, pdb->vco_tune_map);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP1_MODE0, pdb->lock_cmp1_mode0);
+	MDSS_PLL_REG_W(dp_res->pll_base,
+		QSERDES_COM_LOCK_CMP2_MODE0, pdb->lock_cmp2_mode0);
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BG_TIMER, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORECLK_DIV_MODE0, 0x0a);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x17);
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_CORE_CLK_EN, 0x1f);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	if (pdb->orientation == ORIENTATION_CC2)
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x4c);
+	else
+		MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_MODE, 0x5c);
+	/* Make sure the PLL register writes are done */
+	wmb();
+
+	/* TX Lane configuration */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_TX0_TX1_LANE_CTL, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_TX2_TX3_LANE_CTL, 0x05);
+
+	/* TX-0 register configuration */
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3b);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TRAN_DRVR_EMP_EN,
+		pdb->txn_tran_drv_emp_en);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_BAND, 0x4);
+
+	/* TX-1 register configuration */
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRANSCEIVER_BIAS_EN, 0x1a);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_VMODE_CTRL1, 0x40);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_PRE_STALL_LDO_BOOST_EN, 0x30);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3b);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_CLKBUF_ENABLE, 0x0f);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TRAN_DRVR_EMP_EN,
+		pdb->txn_tran_drv_emp_en);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+		TXn_PARRATE_REC_DETECT_IDLE_EN, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_BAND, 0x4);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_VCO_DIV, pdb->phy_vco_div);
+
+	return res;
+}
+
+static bool dp_7nm_pll_lock_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool pll_locked;
+
+	if (readl_poll_timeout_atomic((dp_res->pll_base +
+			QSERDES_COM_C_READY_STATUS),
+			status,
+			((status & BIT(0)) > 0),
+			DP_PHY_PLL_POLL_SLEEP_US,
+			DP_PHY_PLL_POLL_TIMEOUT_US)) {
+		pr_err("C_READY status is not high. Status=%x\n", status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+static bool dp_7nm_phy_rdy_status(struct mdss_pll_resources *dp_res)
+{
+	u32 status;
+	bool phy_ready = true;
+
+	/* poll for PHY ready status */
+	if (readl_poll_timeout_atomic((dp_res->phy_base +
+			DP_PHY_STATUS),
+			status,
+			((status & (BIT(1))) > 0),
+			DP_PHY_PLL_POLL_SLEEP_US,
+			DP_PHY_PLL_POLL_TIMEOUT_US)) {
+		pr_err("Phy_ready is not high. Status=%x\n", status);
+		phy_ready = false;
+	}
+
+	return phy_ready;
+}
+
+static int dp_pll_enable_7nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+	struct dp_pll_db_7nm *pdb = (struct dp_pll_db_7nm *)dp_res->priv;
+	u32 bias_en, drvr_en;
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_AUX_CFG2, 0x24);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x05);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x01);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x09);
+	wmb(); /* Make sure the PHY register writes are done */
+
+	MDSS_PLL_REG_W(dp_res->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+	wmb();	/* Make sure the PLL register writes are done */
+
+	if (!dp_7nm_pll_lock_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+	/* Make sure the PHY register writes are done */
+	wmb();
+	/* poll for PHY ready status */
+	if (!dp_7nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	pr_debug("PLL is locked\n");
+
+	if (pdb->lane_cnt == 1) {
+		bias_en = 0x3e;
+		drvr_en = 0x13;
+	} else {
+		bias_en = 0x3f;
+		drvr_en = 0x10;
+	}
+
+	if (pdb->lane_cnt != 4) {
+		if (pdb->orientation == ORIENTATION_CC1) {
+			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+				TXn_HIGHZ_DRVR_EN, drvr_en);
+			MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+				TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		} else {
+			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+				TXn_HIGHZ_DRVR_EN, drvr_en);
+			MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+				TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		}
+	} else {
+		MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx0_base,
+			TXn_TRANSCEIVER_BIAS_EN, bias_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_HIGHZ_DRVR_EN, drvr_en);
+		MDSS_PLL_REG_W(dp_res->ln_tx1_base,
+			TXn_TRANSCEIVER_BIAS_EN, bias_en);
+	}
+
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_POL_INV, 0x0a);
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x18);
+	udelay(2000);
+
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_CFG, 0x19);
+
+	/*
+	 * Make sure all the register writes are completed before
+	 * doing any other operation
+	 */
+	wmb();
+
+	/* poll for PHY ready status */
+	if (!dp_7nm_phy_rdy_status(dp_res)) {
+		rc = -EINVAL;
+		goto lock_err;
+	}
+
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_DRV_LVL, 0x3f);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_DRV_LVL, 0x3f);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_TX_EMP_POST1_LVL, 0x23);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_TX_EMP_POST1_LVL, 0x23);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x11);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_TX, 0x11);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_RES_CODE_LANE_OFFSET_RX, 0x11);
+	MDSS_PLL_REG_W(dp_res->ln_tx0_base, TXn_INTERFACE_SELECT, 0x3b);
+	MDSS_PLL_REG_W(dp_res->ln_tx1_base, TXn_INTERFACE_SELECT, 0x3b);
+	/* Make sure the PHY register writes are done */
+	wmb();
+
+lock_err:
+	return rc;
+}
+
+static int dp_pll_disable_7nm(struct clk_hw *hw)
+{
+	struct dp_pll_vco_clk *vco = to_dp_vco_hw(hw);
+	struct mdss_pll_resources *dp_res = vco->priv;
+
+	/* Assert DP PHY power down */
+	MDSS_PLL_REG_W(dp_res->phy_base, DP_PHY_PD_CTL, 0x2);
+	/*
+	 * Make sure all the register writes to disable PLL are
+	 * completed before doing any other operation
+	 */
+	wmb();
+
+	return 0;
+}
+
+int dp_vco_prepare_7nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dp_pll_vco_clk *vco;
+	struct mdss_pll_resources *dp_res;
+
+	if (!hw) {
+		pr_err("invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	vco = to_dp_vco_hw(hw);
+	dp_res = vco->priv;
+
+	pr_debug("rate=%ld\n", vco->rate);
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll resources\n");
+		goto error;
+	}
+
+	if ((dp_res->vco_cached_rate != 0)
+		&& (dp_res->vco_cached_rate == vco->rate)) {
+		rc = vco->hw.init->ops->set_rate(hw,
+			dp_res->vco_cached_rate, dp_res->vco_cached_rate);
+		if (rc) {
+			pr_err("index=%d vco_set_rate failed. rc=%d\n",
+				rc, dp_res->index);
+			mdss_pll_resource_enable(dp_res, false);
+			goto error;
+		}
+	}
+
+	rc = dp_pll_enable_7nm(hw);
+	if (rc) {
+		mdss_pll_resource_enable(dp_res, false);
+		pr_err("ndx=%d failed to enable dp pll\n", dp_res->index);
+		goto error;
+	}
+
+	mdss_pll_resource_enable(dp_res, false);
+error:
+	return rc;
+}
+
+void dp_vco_unprepare_7nm(struct clk_hw *hw)
+{
+	struct dp_pll_vco_clk *vco;
+	struct mdss_pll_resources *dp_res;
+
+	if (!hw) {
+		pr_err("invalid input parameters\n");
+		return;
+	}
+
+	vco = to_dp_vco_hw(hw);
+	dp_res = vco->priv;
+
+	if (!dp_res) {
+		pr_err("invalid input parameter\n");
+		return;
+	}
+
+	if (!dp_res->pll_on &&
+		mdss_pll_resource_enable(dp_res, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return;
+	}
+	dp_res->vco_cached_rate = vco->rate;
+	dp_pll_disable_7nm(hw);
+
+	dp_res->handoff_resources = false;
+	mdss_pll_resource_enable(dp_res, false);
+	dp_res->pll_on = false;
+}
+
+int dp_vco_set_rate_7nm(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco;
+	struct mdss_pll_resources *dp_res;
+	int rc;
+
+	if (!hw) {
+		pr_err("invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	vco = to_dp_vco_hw(hw);
+	dp_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	pr_debug("DP lane CLK rate=%ld\n", rate);
+
+	rc = dp_config_vco_rate_7nm(vco, rate);
+	if (rc)
+		pr_err("Failed to set clk rate\n");
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	vco->rate = rate;
+
+	return 0;
+}
+
+unsigned long dp_vco_recalc_rate_7nm(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct dp_pll_vco_clk *vco;
+	int rc;
+	u32 hsclk_sel, link_clk_divsel, hsclk_div, link_clk_div = 0;
+	unsigned long vco_rate;
+	struct mdss_pll_resources *dp_res;
+
+	if (!hw) {
+		pr_err("invalid input parameters\n");
+		return 0;
+	}
+
+	vco = to_dp_vco_hw(hw);
+	dp_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dp_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss DP pll=%d\n", dp_res->index);
+		return 0;
+	}
+
+	pr_debug("input rates: parent=%lu, vco=%lu\n", parent_rate, vco->rate);
+
+	hsclk_sel = MDSS_PLL_REG_R(dp_res->pll_base, QSERDES_COM_HSCLK_SEL);
+	hsclk_sel &= 0x0f;
+
+	if (hsclk_sel == 5)
+		hsclk_div = 5;
+	else if (hsclk_sel == 3)
+		hsclk_div = 3;
+	else if (hsclk_sel == 1)
+		hsclk_div = 2;
+	else if (hsclk_sel == 0)
+		hsclk_div = 1;
+	else {
+		pr_debug("unknown divider. forcing to default\n");
+		hsclk_div = 5;
+	}
+
+	link_clk_divsel = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_AUX_CFG2);
+	link_clk_divsel >>= 2;
+	link_clk_divsel &= 0x3;
+
+	if (link_clk_divsel == 0)
+		link_clk_div = 5;
+	else if (link_clk_divsel == 1)
+		link_clk_div = 10;
+	else if (link_clk_divsel == 2)
+		link_clk_div = 20;
+	else
+		pr_err("unsupported div. Phy_mode: %d\n", link_clk_divsel);
+
+	if (link_clk_div == 20) {
+		vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	} else {
+		if (hsclk_div == 5)
+			vco_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000;
+		else if (hsclk_div == 3)
+			vco_rate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+		else if (hsclk_div == 2)
+			vco_rate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+		else
+			vco_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000;
+	}
+
+	pr_debug("hsclk: sel=0x%x, div=0x%x; lclk: sel=%lu, div=%lu, rate=%lu\n",
+		hsclk_sel, hsclk_div, link_clk_divsel, link_clk_div, vco_rate);
+
+	mdss_pll_resource_enable(dp_res, false);
+
+	dp_res->vco_cached_rate = vco->rate = vco_rate;
+	return vco_rate;
+}
+
+long dp_vco_round_rate_7nm(struct clk_hw *hw, unsigned long rate,
+			unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	struct dp_pll_vco_clk *vco;
+
+	if (!hw) {
+		pr_err("invalid input parameters\n");
+		return 0;
+	}
+
+	vco = to_dp_vco_hw(hw);
+	if (rate <= vco->min_rate)
+		rrate = vco->min_rate;
+	else if (rate <= DP_VCO_HSCLK_RATE_2700MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_2700MHZDIV1000;
+	else if (rate <= DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		rrate = DP_VCO_HSCLK_RATE_5400MHZDIV1000;
+	else
+		rrate = vco->max_rate;
+
+	pr_debug("rrate=%ld\n", rrate);
+
+	if (parent_rate)
+		*parent_rate = rrate;
+	return rrate;
+}
+
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.c b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.c
new file mode 100644
index 0000000..3207eca
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.c
@@ -0,0 +1,297 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Display Port PLL driver block diagram for branch clocks
+ *
+ *		+------------------------------+
+ *		|         DP_VCO_CLK           |
+ *		|                              |
+ *		|    +-------------------+     |
+ *		|    |   (DP PLL/VCO)    |     |
+ *		|    +---------+---------+     |
+ *		|              v               |
+ *		|   +----------+-----------+   |
+ *		|   | hsclk_divsel_clk_src |   |
+ *		|   +----------+-----------+   |
+ *		+------------------------------+
+ *				|
+ *	 +------------<---------v------------>----------+
+ *	 |                                              |
+ * +-----v------------+                                 |
+ * | dp_link_clk_src  |                                 |
+ * |    divsel_ten    |                                 |
+ * +---------+--------+                                 |
+ *	|                                               |
+ *	|                                               |
+ *	v                                               v
+ * Input to DISPCC block                                |
+ * for link clk, crypto clk                             |
+ * and interface clock                                  |
+ *							|
+ *							|
+ *	+--------<------------+-----------------+---<---+
+ *	|                     |                 |
+ * +-------v------+  +--------v-----+  +--------v------+
+ * | vco_divided  |  | vco_divided  |  | vco_divided   |
+ * |    _clk_src  |  |    _clk_src  |  |    _clk_src   |
+ * |              |  |              |  |               |
+ * |divsel_six    |  |  divsel_two  |  |  divsel_four  |
+ * +-------+------+  +-----+--------+  +--------+------+
+ *         |	           |		        |
+ *	v------->----------v-------------<------v
+ *                         |
+ *		+----------+---------+
+ *		|   vco_divided_clk  |
+ *		|       _src_mux     |
+ *		+---------+----------+
+ *                        |
+ *                        v
+ *              Input to DISPCC block
+ *              for DP pixel clock
+ *
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+#include "mdss-pll.h"
+#include "mdss-dp-pll.h"
+#include "mdss-dp-pll-7nm.h"
+
+static struct dp_pll_db_7nm dp_pdb_7nm;
+static const struct clk_ops mux_clk_ops;
+
+static struct regmap_config dp_pll_7nm_cfg = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register = 0x910,
+};
+
+static struct regmap_bus dp_pixel_mux_regmap_ops = {
+	.reg_write = dp_mux_set_parent_7nm,
+	.reg_read = dp_mux_get_parent_7nm,
+};
+
+/* Op structures */
+static const struct clk_ops dp_7nm_vco_clk_ops = {
+	.recalc_rate = dp_vco_recalc_rate_7nm,
+	.set_rate = dp_vco_set_rate_7nm,
+	.round_rate = dp_vco_round_rate_7nm,
+	.prepare = dp_vco_prepare_7nm,
+	.unprepare = dp_vco_unprepare_7nm,
+};
+
+static struct dp_pll_vco_clk dp_vco_clk = {
+	.min_rate = DP_VCO_HSCLK_RATE_1620MHZDIV1000,
+	.max_rate = DP_VCO_HSCLK_RATE_8100MHZDIV1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_clk",
+		.parent_names = (const char *[]){ "xo_board" },
+		.num_parents = 1,
+		.ops = &dp_7nm_vco_clk_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_link_clk_divsel_ten = {
+	.div = 10,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_link_clk_divsel_ten",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_two_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_two_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_four_clk_src = {
+	.div = 4,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_four_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dp_vco_divsel_six_clk_src = {
+	.div = 6,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dp_vco_divsel_six_clk_src",
+		.parent_names =
+			(const char *[]){ "dp_vco_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+
+static int clk_mux_determine_rate(struct clk_hw *hw,
+				  struct clk_rate_request *req)
+{
+	int ret = 0;
+
+	if (!hw || !req) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	ret = __clk_mux_determine_rate_closest(hw, req);
+	if (ret)
+		return ret;
+
+	/* Set the new parent of mux if there is a new valid parent */
+	if (hw->clk && req->best_parent_hw->clk)
+		clk_set_parent(hw->clk, req->best_parent_hw->clk);
+
+	return 0;
+}
+
+static unsigned long mux_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct clk *div_clk = NULL, *vco_clk = NULL;
+	struct dp_pll_vco_clk *vco = NULL;
+
+	if (!hw) {
+		pr_err("Invalid input parameter\n");
+		return 0;
+	}
+
+	div_clk = clk_get_parent(hw->clk);
+	if (!div_clk)
+		return 0;
+
+	vco_clk = clk_get_parent(div_clk);
+	if (!vco_clk)
+		return 0;
+
+	vco = to_dp_vco_hw(__clk_get_hw(vco_clk));
+	if (!vco)
+		return 0;
+
+	if (vco->rate == DP_VCO_HSCLK_RATE_8100MHZDIV1000)
+		return (vco->rate / 6);
+	else if (vco->rate == DP_VCO_HSCLK_RATE_5400MHZDIV1000)
+		return (vco->rate / 4);
+	else
+		return (vco->rate / 2);
+}
+
+static struct clk_regmap_mux dp_vco_divided_clk_src_mux = {
+	.reg = 0x64,
+	.shift = 0,
+	.width = 2,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dp_vco_divided_clk_src_mux",
+			.parent_names =
+				(const char *[]){"dp_vco_divsel_two_clk_src",
+					"dp_vco_divsel_four_clk_src",
+					"dp_vco_divsel_six_clk_src"},
+			.num_parents = 3,
+			.ops = &mux_clk_ops,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		},
+	},
+};
+
+static struct clk_hw *mdss_dp_pllcc_7nm[] = {
+	[DP_VCO_CLK] = &dp_vco_clk.hw,
+	[DP_LINK_CLK_DIVSEL_TEN] = &dp_link_clk_divsel_ten.hw,
+	[DP_VCO_DIVIDED_TWO_CLK_SRC] = &dp_vco_divsel_two_clk_src.hw,
+	[DP_VCO_DIVIDED_FOUR_CLK_SRC] = &dp_vco_divsel_four_clk_src.hw,
+	[DP_VCO_DIVIDED_SIX_CLK_SRC] = &dp_vco_divsel_six_clk_src.hw,
+	[DP_VCO_DIVIDED_CLK_SRC_MUX] = &dp_vco_divided_clk_src_mux.clkr.hw,
+};
+
+int dp_pll_clock_register_7nm(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP, i = 0;
+	struct clk_onecell_data *clk_data;
+	struct clk *clk;
+	struct regmap *regmap;
+	int num_clks = ARRAY_SIZE(mdss_dp_pllcc_7nm);
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
+				sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
+	pll_res->priv = &dp_pdb_7nm;
+	dp_pdb_7nm.pll = pll_res;
+
+	/* Set client data for vco, mux and div clocks */
+	regmap = devm_regmap_init(&pdev->dev, &dp_pixel_mux_regmap_ops,
+			pll_res, &dp_pll_7nm_cfg);
+	dp_vco_divided_clk_src_mux.clkr.regmap = regmap;
+	mux_clk_ops = clk_regmap_mux_closest_ops;
+	mux_clk_ops.determine_rate = clk_mux_determine_rate;
+	mux_clk_ops.recalc_rate = mux_recalc_rate;
+
+	dp_vco_clk.priv = pll_res;
+
+	for (i = DP_VCO_CLK; i <= DP_VCO_DIVIDED_CLK_SRC_MUX; i++) {
+		pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+		clk = devm_clk_register(&pdev->dev, mdss_dp_pllcc_7nm[i]);
+		if (IS_ERR(clk)) {
+			pr_err("clk registration failed for DP: %d\n",
+					pll_res->index);
+			rc = -EINVAL;
+			goto clk_reg_fail;
+		}
+		clk_data->clks[i] = clk;
+	}
+
+	rc = of_clk_add_provider(pdev->dev.of_node,
+			of_clk_src_onecell_get, clk_data);
+	if (rc) {
+		pr_err("Clock register failed rc=%d\n", rc);
+		rc = -EPROBE_DEFER;
+		goto clk_reg_fail;
+	} else {
+		pr_debug("SUCCESS\n");
+	}
+	return rc;
+clk_reg_fail:
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.h b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.h
new file mode 100644
index 0000000..c1461bd
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll-7nm.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDSS_DP_PLL_7NM_H
+#define __MDSS_DP_PLL_7NM_H
+
+#define DP_VCO_HSCLK_RATE_1620MHZDIV1000	1620000UL
+#define DP_VCO_HSCLK_RATE_2700MHZDIV1000	2700000UL
+#define DP_VCO_HSCLK_RATE_5400MHZDIV1000	5400000UL
+#define DP_VCO_HSCLK_RATE_8100MHZDIV1000	8100000UL
+
+struct dp_pll_db_7nm {
+	struct mdss_pll_resources *pll;
+
+	/* lane and orientation settings */
+	u8 lane_cnt;
+	u8 orientation;
+
+	/* COM PHY settings */
+	u32 hsclk_sel;
+	u32 dec_start_mode0;
+	u32 div_frac_start1_mode0;
+	u32 div_frac_start2_mode0;
+	u32 div_frac_start3_mode0;
+	u32 integloop_gain0_mode0;
+	u32 integloop_gain1_mode0;
+	u32 vco_tune_map;
+	u32 lock_cmp1_mode0;
+	u32 lock_cmp2_mode0;
+	u32 lock_cmp_en;
+	u32 cmn_config;
+	u32 txn_tran_drv_emp_en;
+
+	/* PHY vco divider */
+	u32 phy_vco_div;
+};
+
+int dp_vco_set_rate_7nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate);
+unsigned long dp_vco_recalc_rate_7nm(struct clk_hw *hw,
+				unsigned long parent_rate);
+long dp_vco_round_rate_7nm(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate);
+int dp_vco_prepare_7nm(struct clk_hw *hw);
+void dp_vco_unprepare_7nm(struct clk_hw *hw);
+int dp_mux_set_parent_7nm(void *context,
+				unsigned int reg, unsigned int val);
+int dp_mux_get_parent_7nm(void *context,
+				unsigned int reg, unsigned int *val);
+#endif /* __MDSS_DP_PLL_7NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dp-pll.h b/drivers/clk/qcom/mdss/mdss-dp-pll.h
new file mode 100644
index 0000000..bb429d8
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dp-pll.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDSS_DP_PLL_H
+#define __MDSS_DP_PLL_H
+
+struct dp_pll_vco_clk {
+	struct clk_hw hw;
+	unsigned long	rate;		/* current vco rate */
+	u64		min_rate;	/* min vco rate */
+	u64		max_rate;	/* max vco rate */
+	void		*priv;
+};
+
+static inline struct dp_pll_vco_clk *to_dp_vco_hw(struct clk_hw *hw)
+{
+	return container_of(hw, struct dp_pll_vco_clk, hw);
+}
+
+int dp_pll_clock_register_14nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int dp_pll_clock_register_10nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int dp_pll_clock_register_7nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+#endif /* __MDSS_DP_PLL_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-20nm-pll-util.c b/drivers/clk/qcom/mdss/mdss-dsi-20nm-pll-util.c
new file mode 100644
index 0000000..e56b264
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-20nm-pll-util.c
@@ -0,0 +1,1004 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+
+#define MMSS_DSI_PHY_PLL_SYS_CLK_CTRL			0x0000
+#define MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN			0x0004
+#define MMSS_DSI_PHY_PLL_CMN_MODE			0x0008
+#define MMSS_DSI_PHY_PLL_IE_TRIM			0x000C
+#define MMSS_DSI_PHY_PLL_IP_TRIM			0x0010
+
+#define MMSS_DSI_PHY_PLL_PLL_PHSEL_CONTROL		0x0018
+#define MMSS_DSI_PHY_PLL_IPTAT_TRIM_VCCA_TX_SEL		0x001C
+#define MMSS_DSI_PHY_PLL_PLL_PHSEL_DC			0x0020
+#define MMSS_DSI_PHY_PLL_PLL_IP_SETI			0x0024
+#define MMSS_DSI_PHY_PLL_CORE_CLK_IN_SYNC_SEL		0x0028
+
+#define MMSS_DSI_PHY_PLL_BIAS_EN_CLKBUFLR_EN		0x0030
+#define MMSS_DSI_PHY_PLL_PLL_CP_SETI			0x0034
+#define MMSS_DSI_PHY_PLL_PLL_IP_SETP			0x0038
+#define MMSS_DSI_PHY_PLL_PLL_CP_SETP			0x003C
+#define MMSS_DSI_PHY_PLL_ATB_SEL1			0x0040
+#define MMSS_DSI_PHY_PLL_ATB_SEL2			0x0044
+#define MMSS_DSI_PHY_PLL_SYSCLK_EN_SEL_TXBAND		0x0048
+#define MMSS_DSI_PHY_PLL_RESETSM_CNTRL			0x004C
+#define MMSS_DSI_PHY_PLL_RESETSM_CNTRL2			0x0050
+#define MMSS_DSI_PHY_PLL_RESETSM_CNTRL3			0x0054
+#define MMSS_DSI_PHY_PLL_RESETSM_PLL_CAL_COUNT1		0x0058
+#define MMSS_DSI_PHY_PLL_RESETSM_PLL_CAL_COUNT2		0x005C
+#define MMSS_DSI_PHY_PLL_DIV_REF1			0x0060
+#define MMSS_DSI_PHY_PLL_DIV_REF2			0x0064
+#define MMSS_DSI_PHY_PLL_KVCO_COUNT1			0x0068
+#define MMSS_DSI_PHY_PLL_KVCO_COUNT2			0x006C
+#define MMSS_DSI_PHY_PLL_KVCO_CAL_CNTRL			0x0070
+#define MMSS_DSI_PHY_PLL_KVCO_CODE			0x0074
+#define MMSS_DSI_PHY_PLL_VREF_CFG1			0x0078
+#define MMSS_DSI_PHY_PLL_VREF_CFG2			0x007C
+#define MMSS_DSI_PHY_PLL_VREF_CFG3			0x0080
+#define MMSS_DSI_PHY_PLL_VREF_CFG4			0x0084
+#define MMSS_DSI_PHY_PLL_VREF_CFG5			0x0088
+#define MMSS_DSI_PHY_PLL_VREF_CFG6			0x008C
+#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP1			0x0090
+#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP2			0x0094
+#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP3			0x0098
+
+#define MMSS_DSI_PHY_PLL_BGTC				0x00A0
+#define MMSS_DSI_PHY_PLL_PLL_TEST_UPDN			0x00A4
+#define MMSS_DSI_PHY_PLL_PLL_VCO_TUNE			0x00A8
+#define MMSS_DSI_PHY_PLL_DEC_START1			0x00AC
+#define MMSS_DSI_PHY_PLL_PLL_AMP_OS			0x00B0
+#define MMSS_DSI_PHY_PLL_SSC_EN_CENTER			0x00B4
+#define MMSS_DSI_PHY_PLL_SSC_ADJ_PER1			0x00B8
+#define MMSS_DSI_PHY_PLL_SSC_ADJ_PER2			0x00BC
+#define MMSS_DSI_PHY_PLL_SSC_PER1			0x00C0
+#define MMSS_DSI_PHY_PLL_SSC_PER2			0x00C4
+#define MMSS_DSI_PHY_PLL_SSC_STEP_SIZE1			0x00C8
+#define MMSS_DSI_PHY_PLL_SSC_STEP_SIZE2			0x00CC
+#define MMSS_DSI_PHY_PLL_RES_CODE_UP			0x00D0
+#define MMSS_DSI_PHY_PLL_RES_CODE_DN			0x00D4
+#define MMSS_DSI_PHY_PLL_RES_CODE_UP_OFFSET		0x00D8
+#define MMSS_DSI_PHY_PLL_RES_CODE_DN_OFFSET		0x00DC
+#define MMSS_DSI_PHY_PLL_RES_CODE_START_SEG1		0x00E0
+#define MMSS_DSI_PHY_PLL_RES_CODE_START_SEG2		0x00E4
+#define MMSS_DSI_PHY_PLL_RES_CODE_CAL_CSR		0x00E8
+#define MMSS_DSI_PHY_PLL_RES_CODE			0x00EC
+#define MMSS_DSI_PHY_PLL_RES_TRIM_CONTROL		0x00F0
+#define MMSS_DSI_PHY_PLL_RES_TRIM_CONTROL2		0x00F4
+#define MMSS_DSI_PHY_PLL_RES_TRIM_EN_VCOCALDONE		0x00F8
+#define MMSS_DSI_PHY_PLL_FAUX_EN			0x00FC
+
+#define MMSS_DSI_PHY_PLL_DIV_FRAC_START1		0x0100
+#define MMSS_DSI_PHY_PLL_DIV_FRAC_START2		0x0104
+#define MMSS_DSI_PHY_PLL_DIV_FRAC_START3		0x0108
+#define MMSS_DSI_PHY_PLL_DEC_START2			0x010C
+#define MMSS_DSI_PHY_PLL_PLL_RXTXEPCLK_EN		0x0110
+#define MMSS_DSI_PHY_PLL_PLL_CRCTRL			0x0114
+#define MMSS_DSI_PHY_PLL_LOW_POWER_RO_CONTROL		0x013C
+#define MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL		0x0140
+#define MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER		0x0144
+#define MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER		0x0148
+#define MMSS_DSI_PHY_PLL_PLL_VCO_HIGH			0x014C
+#define MMSS_DSI_PHY_PLL_RESET_SM			0x0150
+#define MMSS_DSI_PHY_PLL_MUXVAL			0x0154
+#define MMSS_DSI_PHY_PLL_CORE_RES_CODE_DN		0x0158
+#define MMSS_DSI_PHY_PLL_CORE_RES_CODE_UP		0x015C
+#define MMSS_DSI_PHY_PLL_CORE_VCO_TUNE			0x0160
+#define MMSS_DSI_PHY_PLL_CORE_VCO_TAIL			0x0164
+#define MMSS_DSI_PHY_PLL_CORE_KVCO_CODE		0x0168
+
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL0		0x014
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL1		0x018
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL2		0x01C
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL3		0x020
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL4		0x024
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL5		0x028
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL6		0x02C
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL7		0x030
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL8		0x034
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL9		0x038
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL10		0x03C
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL11		0x040
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL12		0x044
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL13		0x048
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL14		0x04C
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL15		0x050
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL16		0x054
+#define MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL17		0x058
+
+#define DSI_PLL_POLL_DELAY_US			1000
+#define DSI_PLL_POLL_TIMEOUT_US			15000
+
+int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel)
+{
+	return 0;
+}
+
+int get_mdss_byte_mux_sel(struct mux_clk *clk)
+{
+	return 0;
+}
+
+int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel)
+{
+	return 0;
+}
+
+int get_mdss_pixel_mux_sel(struct mux_clk *clk)
+{
+	return 0;
+}
+
+static void pll_20nm_cache_trim_codes(struct mdss_pll_resources *dsi_pll_res)
+{
+	int rc;
+
+	if (dsi_pll_res->reg_upd)
+		return;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return;
+	}
+
+	dsi_pll_res->cache_pll_trim_codes[0] =
+		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_CORE_KVCO_CODE);
+	dsi_pll_res->cache_pll_trim_codes[1] =
+		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_CORE_VCO_TUNE);
+
+	pr_debug("core_kvco_code=0x%x core_vco_turn=0x%x\n",
+		dsi_pll_res->cache_pll_trim_codes[0],
+		dsi_pll_res->cache_pll_trim_codes[1]);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+
+	dsi_pll_res->reg_upd = true;
+}
+
+static void pll_20nm_override_trim_codes(struct mdss_pll_resources *dsi_pll_res)
+{
+	u32 reg_data;
+	void __iomem *pll_base = dsi_pll_res->pll_base;
+
+	/*
+	 * Override mux config for all cached trim codes from
+	 * saved config except for VCO Tune
+	 */
+	reg_data = (dsi_pll_res->cache_pll_trim_codes[0] & 0x3f) | BIT(5);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_KVCO_CODE, reg_data);
+
+	reg_data = (dsi_pll_res->cache_pll_trim_codes[1] & 0x7f) | BIT(7);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCO_TUNE, reg_data);
+}
+
+
+int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel)
+{
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+	int reg_data;
+
+	pr_debug("bypass_lp_div mux set to %s mode\n",
+				sel ? "indirect" : "direct");
+
+	pr_debug("POST_DIVIDER_CONTROL = 0x%x\n",
+		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL));
+
+	reg_data = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL);
+	reg_data |= BIT(7);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
+			reg_data | (sel << 5));
+	pr_debug("POST_DIVIDER_CONTROL = 0x%x\n",
+		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL));
+
+	return 0;
+}
+
+int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel)
+{
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+	int reg_data, rem;
+
+	if (!dsi_pll_res->resource_enable) {
+		pr_err("PLL resources disabled. Dynamic fps invalid\n");
+		return -EINVAL;
+	}
+
+	reg_data = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL);
+	reg_data |= BIT(7);
+
+	pr_debug("%d: reg_data = %x\n", __LINE__, reg_data);
+
+	/* Repeat POST DIVIDER 2 times (4 writes)*/
+	for (rem = 0; rem < 2; rem++)
+		MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+			MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL16 + (4 * rem),
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
+			(reg_data | (sel << 5)), (reg_data | (sel << 5)));
+
+	return 0;
+}
+
+int get_bypass_lp_div_mux_sel(struct mux_clk *clk)
+{
+	int mux_mode, rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	mux_mode = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL) & BIT(5);
+
+	pr_debug("bypass_lp_div mux mode = %s\n",
+				mux_mode ? "indirect" : "direct");
+	mdss_pll_resource_enable(dsi_pll_res, false);
+
+	return !!mux_mode;
+}
+
+int ndiv_set_div(struct div_clk *clk, int div)
+{
+	int rc, reg_data;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	reg_data = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
+				reg_data | div);
+
+	pr_debug("POST_DIVIDER_CONTROL = 0x%x\n",
+		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL));
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+int shadow_ndiv_set_div(struct div_clk *clk, int div)
+{
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (!dsi_pll_res->resource_enable) {
+		pr_err("PLL resources disabled. Dynamic fps invalid\n");
+		return -EINVAL;
+	}
+
+	pr_debug("%d div=%i\n", __LINE__, div);
+
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL14,
+		MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
+		MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
+		0x07, (0xB | div));
+
+	return 0;
+}
+
+int ndiv_get_div(struct div_clk *clk)
+{
+	int div = 0, rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(clk->priv, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+		MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL) & 0x0F;
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+
+	return div;
+}
+
+int fixed_hr_oclk2_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER,
+				(div - 1));
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div)
+{
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (!dsi_pll_res->resource_enable) {
+		pr_err("PLL resources disabled. Dynamic fps invalid\n");
+		return -EINVAL;
+	}
+	pr_debug("%d div = %d\n", __LINE__, div);
+
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL5,
+		MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER,
+		MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER,
+		(div - 1), (div - 1));
+
+	return 0;
+}
+
+int fixed_hr_oclk2_get_div(struct div_clk *clk)
+{
+	int div = 0, rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+				MMSS_DSI_PHY_PLL_HR_OCLK2_DIVIDER);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return div + 1;
+}
+
+int hr_oclk3_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	pr_debug("%d div = %d\n", __LINE__, div);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER,
+				(div - 1));
+	pr_debug("%s: HR_OCLK3_DIVIDER = 0x%x\n", __func__,
+		MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER));
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+int shadow_hr_oclk3_set_div(struct div_clk *clk, int div)
+{
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (!dsi_pll_res->resource_enable) {
+		pr_err("PLL resources disabled. Dynamic fps invalid\n");
+		return -EINVAL;
+	}
+
+	pr_debug("%d div = %d\n", __LINE__, div);
+
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL6,
+		MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER,
+		MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER,
+		(div - 1), (div - 1));
+
+	return 0;
+}
+
+int hr_oclk3_get_div(struct div_clk *clk)
+{
+	int div = 0, rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+				MMSS_DSI_PHY_PLL_HR_OCLK3_DIVIDER);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return div + 1;
+}
+
+static bool pll_20nm_is_pll_locked(struct mdss_pll_resources *dsi_pll_res)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((dsi_pll_res->pll_base +
+			MMSS_DSI_PHY_PLL_RESET_SM),
+			status,
+			((status & BIT(5)) > 0),
+			DSI_PLL_POLL_DELAY_US,
+			DSI_PLL_POLL_TIMEOUT_US)) {
+		pr_debug("DSI PLL status=%x failed to Lock\n", status);
+		pll_locked = false;
+	} else if (readl_poll_timeout_atomic((dsi_pll_res->pll_base +
+			MMSS_DSI_PHY_PLL_RESET_SM),
+			status,
+			((status & BIT(6)) > 0),
+			DSI_PLL_POLL_DELAY_US,
+			DSI_PLL_POLL_TIMEOUT_US)) {
+		pr_debug("DSI PLL status=%x PLl not ready\n", status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+void __dsi_pll_disable(void __iomem *pll_base)
+{
+	if (!pll_base) {
+		pr_err("Invalid pll base\n");
+		return;
+	}
+	pr_debug("Disabling PHY PLL for PLL_BASE=%p\n", pll_base);
+
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN, 0x02);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL3, 0x06);
+}
+
+static void pll_20nm_config_powerdown(void __iomem *pll_base)
+{
+	if (!pll_base) {
+		pr_err("Invalid pll base.\n");
+		return;
+	}
+
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_SYS_CLK_CTRL, 0x00);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_CMN_MODE, 0x01);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN, 0x82);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_BIAS_EN_CLKBUFLR_EN, 0x02);
+}
+
+static int dsi_pll_enable(struct clk *c)
+{
+	int i, rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/* Try all enable sequences until one succeeds */
+	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+		rc = vco->pll_enable_seqs[i](dsi_pll_res);
+		pr_debug("DSI PLL %s after sequence #%d\n",
+			rc ? "unlocked" : "locked", i + 1);
+		if (!rc)
+			break;
+	}
+	/* Disable PLL1 to avoid current leakage while toggling MDSS GDSC */
+	if (dsi_pll_res->pll_1_base)
+		pll_20nm_config_powerdown(dsi_pll_res->pll_1_base);
+
+	if (rc) {
+		mdss_pll_resource_enable(dsi_pll_res, false);
+		pr_err("DSI PLL failed to lock\n");
+	}
+	dsi_pll_res->pll_on = true;
+
+	return rc;
+}
+
+static void dsi_pll_disable(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (!dsi_pll_res->pll_on &&
+		mdss_pll_resource_enable(dsi_pll_res, true)) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return;
+	}
+
+	dsi_pll_res->handoff_resources = false;
+
+	__dsi_pll_disable(dsi_pll_res->pll_base);
+
+	/* Disable PLL1 to avoid current leakage while toggling MDSS GDSC */
+	if (dsi_pll_res->pll_1_base)
+		pll_20nm_config_powerdown(dsi_pll_res->pll_1_base);
+
+	pll_20nm_config_powerdown(dsi_pll_res->pll_base);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+
+	dsi_pll_res->pll_on = false;
+
+	pr_debug("DSI PLL Disabled\n");
+}
+
+static void pll_20nm_config_common_block_1(void __iomem *pll_base)
+{
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN, 0x82);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_BIAS_EN_CLKBUFLR_EN, 0x2a);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_BIAS_EN_CLKBUFLR_EN, 0x2b);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL3, 0x02);
+}
+
+static void pll_20nm_config_common_block_2(void __iomem *pll_base)
+{
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_SYS_CLK_CTRL, 0x40);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_IE_TRIM, 0x0F);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_IP_TRIM, 0x0F);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_PHSEL_CONTROL, 0x08);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_IPTAT_TRIM_VCCA_TX_SEL, 0x0E);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN, 0x08);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_SYSCLK_EN_SEL_TXBAND, 0x4A);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_REF1, 0x00);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_REF2, 0x01);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_CNTRL, 0x07);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_KVCO_CAL_CNTRL, 0x1f);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_KVCO_COUNT1, 0x8A);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_VREF_CFG3, 0x10);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_SSC_EN_CENTER, 0x00);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_FAUX_EN, 0x0C);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_RXTXEPCLK_EN, 0x0a);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_LOW_POWER_RO_CONTROL, 0x0f);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_CMN_MODE, 0x00);
+}
+
+static void pll_20nm_config_loop_bw(void __iomem *pll_base)
+{
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_IP_SETI, 0x03);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_CP_SETI, 0x3F);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_IP_SETP, 0x03);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_CP_SETP, 0x1F);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_CRCTRL, 0x77);
+}
+
+static void pll_20nm_vco_rate_calc(struct mdss_pll_vco_calc *vco_calc,
+	s64 vco_clk_rate, s64 ref_clk_rate)
+{
+	s64 multiplier = (1 << 20);
+	s64 duration = 1024, pll_comp_val;
+	s64 dec_start_multiple, dec_start;
+	s32 div_frac_start;
+	s64 dec_start1, dec_start2;
+	s32 div_frac_start1, div_frac_start2, div_frac_start3;
+	s64 pll_plllock_cmp1, pll_plllock_cmp2, pll_plllock_cmp3;
+
+	memset(vco_calc, 0, sizeof(*vco_calc));
+	pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n", vco_clk_rate,
+		ref_clk_rate);
+
+	dec_start_multiple = div_s64(vco_clk_rate * multiplier,
+					2 * ref_clk_rate);
+	div_s64_rem(dec_start_multiple,
+			multiplier, &div_frac_start);
+
+	dec_start = div_s64(dec_start_multiple, multiplier);
+	dec_start1 = (dec_start & 0x7f) | BIT(7);
+	dec_start2 = ((dec_start & 0x80) >> 7) | BIT(1);
+	div_frac_start1 = (div_frac_start & 0x7f) | BIT(7);
+	div_frac_start2 = ((div_frac_start >> 7) & 0x7f) | BIT(7);
+	div_frac_start3 = ((div_frac_start >> 14) & 0x3f) | BIT(6);
+	pll_comp_val = (div_s64(dec_start_multiple * 2 * duration,
+				10 * multiplier)) - 1;
+	pll_plllock_cmp1 = pll_comp_val & 0xff;
+	pll_plllock_cmp2 = (pll_comp_val >> 8) & 0xff;
+	pll_plllock_cmp3 = (pll_comp_val >> 16) & 0xff;
+
+	pr_debug("dec_start_multiple = 0x%llx\n", dec_start_multiple);
+	pr_debug("dec_start = 0x%llx, div_frac_start = 0x%x\n",
+			dec_start, div_frac_start);
+	pr_debug("dec_start1 = 0x%llx, dec_start2 = 0x%llx\n",
+			dec_start1, dec_start2);
+	pr_debug("div_frac_start1 = 0x%x, div_frac_start2 = 0x%x\n",
+			div_frac_start1, div_frac_start2);
+	pr_debug("div_frac_start3 = 0x%x\n", div_frac_start3);
+	pr_debug("pll_comp_val = 0x%llx\n", pll_comp_val);
+	pr_debug("pll_plllock_cmp1 = 0x%llx, pll_plllock_cmp2 =%llx\n",
+			pll_plllock_cmp1, pll_plllock_cmp2);
+	pr_debug("pll_plllock_cmp3 = 0x%llx\n",	pll_plllock_cmp3);
+
+	/* Assign to vco struct */
+	vco_calc->div_frac_start1 = div_frac_start1;
+	vco_calc->div_frac_start2 = div_frac_start2;
+	vco_calc->div_frac_start3 = div_frac_start3;
+	vco_calc->dec_start1 = dec_start1;
+	vco_calc->dec_start2 = dec_start2;
+	vco_calc->pll_plllock_cmp1 = pll_plllock_cmp1;
+	vco_calc->pll_plllock_cmp2 = pll_plllock_cmp2;
+	vco_calc->pll_plllock_cmp3 = pll_plllock_cmp3;
+}
+
+static void pll_20nm_config_vco_rate(void __iomem *pll_base,
+	struct mdss_pll_vco_calc *vco_calc)
+{
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_FRAC_START1,
+		vco_calc->div_frac_start1);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_FRAC_START2,
+		vco_calc->div_frac_start2);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DIV_FRAC_START3,
+		vco_calc->div_frac_start3);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DEC_START1,
+		vco_calc->dec_start1);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_DEC_START2,
+		vco_calc->dec_start2);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLLLOCK_CMP1,
+		vco_calc->pll_plllock_cmp1);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLLLOCK_CMP2,
+		vco_calc->pll_plllock_cmp2);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLLLOCK_CMP3,
+		vco_calc->pll_plllock_cmp3);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN, 0x01);
+}
+
+int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate)
+{
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	dsi_pll_res->vco_current_rate = rate;
+	dsi_pll_res->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	return 0;
+}
+
+int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
+		unsigned long rate)
+{
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+	struct mdss_pll_vco_calc vco_calc;
+	s64 vco_clk_rate = rate;
+	u32 rem;
+
+	if (!dsi_pll_res->resource_enable) {
+		pr_err("PLL resources disabled. Dynamic fps invalid\n");
+		return -EINVAL;
+	}
+
+	pr_debug("req vco set rate: %lld\n", vco_clk_rate);
+
+	pll_20nm_override_trim_codes(dsi_pll_res);
+
+	/* div fraction, start and comp calculations */
+	pll_20nm_vco_rate_calc(&vco_calc, vco_clk_rate,
+		dsi_pll_res->vco_ref_clk_rate);
+
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL0,
+		MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
+		MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN,
+		0xB1, 0);
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL1,
+		MMSS_DSI_PHY_PLL_PLLLOCK_CMP1,
+		MMSS_DSI_PHY_PLL_PLLLOCK_CMP2,
+		vco_calc.pll_plllock_cmp1, vco_calc.pll_plllock_cmp2);
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL2,
+		MMSS_DSI_PHY_PLL_PLLLOCK_CMP3,
+		MMSS_DSI_PHY_PLL_DEC_START1,
+		vco_calc.pll_plllock_cmp3, vco_calc.dec_start1);
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL3,
+		MMSS_DSI_PHY_PLL_DEC_START2,
+		MMSS_DSI_PHY_PLL_DIV_FRAC_START1,
+		vco_calc.dec_start2, vco_calc.div_frac_start1);
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL4,
+		MMSS_DSI_PHY_PLL_DIV_FRAC_START2,
+		MMSS_DSI_PHY_PLL_DIV_FRAC_START3,
+		vco_calc.div_frac_start2, vco_calc.div_frac_start3);
+	/* Method 2 - Auto PLL calibration */
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL7,
+		MMSS_DSI_PHY_PLL_PLL_VCO_TUNE,
+		MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN,
+		0, 0x0D);
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL8,
+		MMSS_DSI_PHY_PLL_POST_DIVIDER_CONTROL,
+		MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
+		0xF0, 0x07);
+
+	/*
+	 * RESETSM_CTRL3 has to be set for 12 times (6 reg writes),
+	 * Each register setting write 2 times, running in loop for 5
+	 * times (5 reg writes) and other two iterations are taken
+	 * care (one above and other in shadow_bypass
+	 */
+	for (rem = 0; rem < 5; rem++) {
+		MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+				MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL9 + (4 * rem),
+				MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
+				MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
+				0x07, 0x07);
+	}
+
+	MDSS_DYN_PLL_REG_W(dsi_pll_res->dyn_pll_base,
+		MMSS_DSI_DYNAMIC_REFRESH_PLL_CTRL15,
+		MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
+		MMSS_DSI_PHY_PLL_RESETSM_CNTRL3,
+		0x03, 0x03);
+
+	/* memory barrier */
+	wmb();
+	return 0;
+}
+
+unsigned long pll_20nm_vco_get_rate(struct clk *c)
+{
+	u64 vco_rate, multiplier = (1 << 20);
+	s32 div_frac_start;
+	u32 dec_start;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	u64 ref_clk = vco->ref_clk_rate;
+	int rc;
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	dec_start = (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_DEC_START2) & BIT(0)) << 7;
+	dec_start |= (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_DEC_START1) & 0x7f);
+	pr_debug("dec_start = 0x%x\n", dec_start);
+
+	div_frac_start = (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_DIV_FRAC_START3) & 0x3f) << 14;
+	div_frac_start |= (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_DIV_FRAC_START2) & 0x7f) << 7;
+	div_frac_start |= MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			MMSS_DSI_PHY_PLL_DIV_FRAC_START1) & 0x7f;
+	pr_debug("div_frac_start = 0x%x\n", div_frac_start);
+
+	vco_rate = ref_clk * 2 * dec_start;
+	vco_rate += ((ref_clk * 2 * div_frac_start) / multiplier);
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+
+	return (unsigned long)vco_rate;
+}
+long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	return rrate;
+}
+
+enum handoff pll_20nm_vco_handoff(struct clk *c)
+{
+	int rc;
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return HANDOFF_DISABLED_CLK;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return ret;
+	}
+
+	if (pll_20nm_is_pll_locked(dsi_pll_res)) {
+		dsi_pll_res->handoff_resources = true;
+		dsi_pll_res->pll_on = true;
+		c->rate = pll_20nm_vco_get_rate(c);
+		ret = HANDOFF_ENABLED_CLK;
+		dsi_pll_res->vco_locking_rate = c->rate;
+		dsi_pll_res->is_init_locked = true;
+		pll_20nm_cache_trim_codes(dsi_pll_res);
+		pr_debug("handoff vco_locking_rate=%llu\n",
+			dsi_pll_res->vco_locking_rate);
+	} else {
+		mdss_pll_resource_enable(dsi_pll_res, false);
+		dsi_pll_res->vco_locking_rate = 0;
+		dsi_pll_res->is_init_locked = false;
+	}
+
+	return ret;
+}
+
+int pll_20nm_vco_prepare(struct clk *c)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (!dsi_pll_res) {
+		pr_err("Dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	if ((dsi_pll_res->vco_cached_rate != 0)
+	    && (dsi_pll_res->vco_cached_rate == c->rate)) {
+		rc = c->ops->set_rate(c, dsi_pll_res->vco_cached_rate);
+		if (rc) {
+			pr_err("vco_set_rate failed. rc=%d\n", rc);
+			goto error;
+		}
+	}
+
+	rc = dsi_pll_enable(c);
+
+error:
+	return rc;
+}
+
+void pll_20nm_vco_unprepare(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (!dsi_pll_res) {
+		pr_err("Dsi pll resources are not available\n");
+		return;
+	}
+
+	dsi_pll_res->vco_cached_rate = c->rate;
+	dsi_pll_disable(c);
+}
+
+static void pll_20nm_config_resetsm(void __iomem *pll_base)
+{
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL, 0x24);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL2, 0x07);
+}
+
+static void pll_20nm_config_vco_start(void __iomem *pll_base)
+{
+
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_VCOTAIL_EN, 0x03);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL3, 0x02);
+	udelay(10);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL3, 0x03);
+}
+
+static void pll_20nm_config_bypass_cal(void __iomem *pll_base)
+{
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_RESETSM_CNTRL, 0xac);
+	MDSS_PLL_REG_W(pll_base, MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN, 0x28);
+}
+
+static int pll_20nm_vco_relock(struct mdss_pll_resources *dsi_pll_res)
+{
+	int rc = 0;
+
+	pll_20nm_override_trim_codes(dsi_pll_res);
+	pll_20nm_config_bypass_cal(dsi_pll_res->pll_base);
+	pll_20nm_config_vco_start(dsi_pll_res->pll_base);
+
+	if (!pll_20nm_is_pll_locked(dsi_pll_res)) {
+		pr_err("DSI PLL re-lock failed\n");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int pll_20nm_vco_init_lock(struct mdss_pll_resources *dsi_pll_res)
+{
+	int rc = 0;
+
+	pll_20nm_config_resetsm(dsi_pll_res->pll_base);
+	pll_20nm_config_vco_start(dsi_pll_res->pll_base);
+
+	if (!pll_20nm_is_pll_locked(dsi_pll_res)) {
+		pr_err("DSI PLL init lock failed\n");
+		rc = -EINVAL;
+		goto init_lock_err;
+	}
+
+	pll_20nm_cache_trim_codes(dsi_pll_res);
+
+init_lock_err:
+	return rc;
+}
+
+int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res)
+{
+	int rc = 0;
+	struct mdss_pll_vco_calc vco_calc;
+
+	if (!dsi_pll_res) {
+		pr_err("Invalid PLL resources\n");
+		return -EINVAL;
+	}
+
+	pll_20nm_config_common_block_1(dsi_pll_res->pll_1_base);
+	pll_20nm_config_common_block_1(dsi_pll_res->pll_base);
+	pll_20nm_config_common_block_2(dsi_pll_res->pll_base);
+	pll_20nm_config_loop_bw(dsi_pll_res->pll_base);
+
+	pll_20nm_vco_rate_calc(&vco_calc, dsi_pll_res->vco_current_rate,
+		dsi_pll_res->vco_ref_clk_rate);
+	pll_20nm_config_vco_rate(dsi_pll_res->pll_base, &vco_calc);
+
+	pr_debug("init lock=%d prev vco_rate=%llu, new vco_rate=%llu\n",
+		dsi_pll_res->is_init_locked, dsi_pll_res->vco_locking_rate,
+		dsi_pll_res->vco_current_rate);
+	/*
+	 * Run auto-lock sequence if it is either bootup initial
+	 * locking or when the vco rate is changed. Otherwise, just
+	 * use stored codes and bypass caliberation.
+	 */
+	if (!dsi_pll_res->is_init_locked || (dsi_pll_res->vco_locking_rate !=
+			dsi_pll_res->vco_current_rate)) {
+		rc = pll_20nm_vco_init_lock(dsi_pll_res);
+		dsi_pll_res->is_init_locked = (rc) ? false : true;
+	} else {
+		rc = pll_20nm_vco_relock(dsi_pll_res);
+	}
+
+	dsi_pll_res->vco_locking_rate = (rc) ? 0 :
+		dsi_pll_res->vco_current_rate;
+
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
new file mode 100644
index 0000000..b84fcaf
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -0,0 +1,1643 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include "mdss-dsi-pll.h"
+#include "mdss-pll.h"
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+#define CREATE_TRACE_POINTS
+#include "mdss_pll_trace.h"
+
+#define VCO_DELAY_USEC 1
+
+#define MHZ_250		250000000UL
+#define MHZ_500		500000000UL
+#define MHZ_1000	1000000000UL
+#define MHZ_1100	1100000000UL
+#define MHZ_1900	1900000000UL
+#define MHZ_3000	3000000000UL
+
+/* Register Offsets from PLL base address */
+#define PLL_ANALOG_CONTROLS_ONE			0x000
+#define PLL_ANALOG_CONTROLS_TWO			0x004
+#define PLL_INT_LOOP_SETTINGS			0x008
+#define PLL_INT_LOOP_SETTINGS_TWO		0x00c
+#define PLL_ANALOG_CONTROLS_THREE		0x010
+#define PLL_ANALOG_CONTROLS_FOUR		0x014
+#define PLL_INT_LOOP_CONTROLS			0x018
+#define PLL_DSM_DIVIDER				0x01c
+#define PLL_FEEDBACK_DIVIDER			0x020
+#define PLL_SYSTEM_MUXES			0x024
+#define PLL_FREQ_UPDATE_CONTROL_OVERRIDES	0x028
+#define PLL_CMODE				0x02c
+#define PLL_CALIBRATION_SETTINGS		0x030
+#define PLL_BAND_SEL_CAL_TIMER_LOW		0x034
+#define PLL_BAND_SEL_CAL_TIMER_HIGH		0x038
+#define PLL_BAND_SEL_CAL_SETTINGS		0x03c
+#define PLL_BAND_SEL_MIN			0x040
+#define PLL_BAND_SEL_MAX			0x044
+#define PLL_BAND_SEL_PFILT			0x048
+#define PLL_BAND_SEL_IFILT			0x04c
+#define PLL_BAND_SEL_CAL_SETTINGS_TWO		0x050
+#define PLL_BAND_SEL_CAL_SETTINGS_THREE		0x054
+#define PLL_BAND_SEL_CAL_SETTINGS_FOUR		0x058
+#define PLL_BAND_SEL_ICODE_HIGH			0x05c
+#define PLL_BAND_SEL_ICODE_LOW			0x060
+#define PLL_FREQ_DETECT_SETTINGS_ONE		0x064
+#define PLL_PFILT				0x07c
+#define PLL_IFILT				0x080
+#define PLL_GAIN				0x084
+#define PLL_ICODE_LOW				0x088
+#define PLL_ICODE_HIGH				0x08c
+#define PLL_LOCKDET				0x090
+#define PLL_OUTDIV				0x094
+#define PLL_FASTLOCK_CONTROL			0x098
+#define PLL_PASS_OUT_OVERRIDE_ONE		0x09c
+#define PLL_PASS_OUT_OVERRIDE_TWO		0x0a0
+#define PLL_CORE_OVERRIDE			0x0a4
+#define PLL_CORE_INPUT_OVERRIDE			0x0a8
+#define PLL_RATE_CHANGE				0x0ac
+#define PLL_PLL_DIGITAL_TIMERS			0x0b0
+#define PLL_PLL_DIGITAL_TIMERS_TWO		0x0b4
+#define PLL_DEC_FRAC_MUXES			0x0c8
+#define PLL_DECIMAL_DIV_START_1			0x0cc
+#define PLL_FRAC_DIV_START_LOW_1		0x0d0
+#define PLL_FRAC_DIV_START_MID_1		0x0d4
+#define PLL_FRAC_DIV_START_HIGH_1		0x0d8
+#define PLL_MASH_CONTROL			0x0ec
+#define PLL_SSC_MUX_CONTROL			0x108
+#define PLL_SSC_STEPSIZE_LOW_1			0x10c
+#define PLL_SSC_STEPSIZE_HIGH_1			0x110
+#define PLL_SSC_DIV_PER_LOW_1			0x114
+#define PLL_SSC_DIV_PER_HIGH_1			0x118
+#define PLL_SSC_DIV_ADJPER_LOW_1		0x11c
+#define PLL_SSC_DIV_ADJPER_HIGH_1		0x120
+#define PLL_SSC_CONTROL				0x13c
+#define PLL_PLL_OUTDIV_RATE			0x140
+#define PLL_PLL_LOCKDET_RATE_1			0x144
+#define PLL_PLL_PROP_GAIN_RATE_1		0x14c
+#define PLL_PLL_BAND_SET_RATE_1			0x154
+#define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x15c
+#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x164
+#define PLL_FASTLOCK_EN_BAND			0x16c
+#define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x17c
+#define PLL_PLL_LOCK_OVERRIDE			0x180
+#define PLL_PLL_LOCK_DELAY			0x184
+#define PLL_PLL_LOCK_MIN_DELAY			0x188
+#define PLL_CLOCK_INVERTERS			0x18c
+#define PLL_SPARE_AND_JPC_OVERRIDES		0x190
+#define PLL_BIAS_CONTROL_1			0x194
+#define PLL_BIAS_CONTROL_2			0x198
+#define PLL_ALOG_OBSV_BUS_CTRL_1		0x19c
+#define PLL_COMMON_STATUS_ONE			0x1a0
+
+/* Register Offsets from PHY base address */
+#define PHY_CMN_CLK_CFG0	0x010
+#define PHY_CMN_CLK_CFG1	0x014
+#define PHY_CMN_RBUF_CTRL	0x01c
+#define PHY_CMN_PLL_CNTRL	0x038
+#define PHY_CMN_CTRL_0		0x024
+
+/* Bit definition of SSC control registers */
+#define SSC_CENTER		BIT(0)
+#define SSC_EN			BIT(1)
+#define SSC_FREQ_UPDATE		BIT(2)
+#define SSC_FREQ_UPDATE_MUX	BIT(3)
+#define SSC_UPDATE_SSC		BIT(4)
+#define SSC_UPDATE_SSC_MUX	BIT(5)
+#define SSC_START		BIT(6)
+#define SSC_START_MUX		BIT(7)
+
+enum {
+	DSI_PLL_0,
+	DSI_PLL_1,
+	DSI_PLL_MAX
+};
+
+struct dsi_pll_regs {
+	u32 pll_prop_gain_rate;
+	u32 pll_lockdet_rate;
+	u32 decimal_div_start;
+	u32 frac_div_start_low;
+	u32 frac_div_start_mid;
+	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
+	u32 ssc_stepsize_low;
+	u32 ssc_stepsize_high;
+	u32 ssc_div_per_low;
+	u32 ssc_div_per_high;
+	u32 ssc_adjper_low;
+	u32 ssc_adjper_high;
+	u32 ssc_control;
+};
+
+struct dsi_pll_config {
+	u32 ref_freq;
+	bool div_override;
+	u32 output_div;
+	bool ignore_frac;
+	bool disable_prescaler;
+	bool enable_ssc;
+	bool ssc_center;
+	u32 dec_bits;
+	u32 frac_bits;
+	u32 lock_timer;
+	u32 ssc_freq;
+	u32 ssc_offset;
+	u32 ssc_adj_per;
+	u32 thresh_cycles;
+	u32 refclk_cycles;
+};
+
+struct dsi_pll_10nm {
+	struct mdss_pll_resources *rsc;
+	struct dsi_pll_config pll_configuration;
+	struct dsi_pll_regs reg_setup;
+};
+
+static inline int pll_reg_read(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = MDSS_PLL_REG_R(rsc->pll_base, reg);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int pll_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(rsc->pll_base, reg, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_read(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(rsc->phy_base, reg, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_update_bits_sub(struct mdss_pll_resources *rsc,
+		unsigned int reg, unsigned int mask, unsigned int val)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	reg_val &= ~mask;
+	reg_val |= (val & mask);
+	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+	return 0;
+}
+
+static inline int phy_reg_update_bits(void *context, unsigned int reg,
+				unsigned int mask, unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = phy_reg_update_bits_sub(rsc, reg, mask, val);
+	if (!rc && rsc->slave)
+		rc = phy_reg_update_bits_sub(rsc->slave, reg, mask, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int pclk_mux_read_sel(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc)
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+	else
+		*val = (MDSS_PLL_REG_R(rsc->phy_base, reg) & 0x3);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+	return rc;
+}
+
+
+static inline int pclk_mux_write_sel_sub(struct mdss_pll_resources *rsc,
+				unsigned int reg, unsigned int val)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	reg_val &= ~0x03;
+	reg_val |= val;
+
+	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+	return 0;
+}
+
+static inline int pclk_mux_write_sel(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pclk_mux_write_sel_sub(rsc, reg, val);
+	if (!rc && rsc->slave)
+		rc = pclk_mux_write_sel_sub(rsc->slave, reg, val);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	/*
+	 * cache the current parent index for cases where parent
+	 * is not changing but rate is changing. In that case
+	 * clock framework won't call parent_set and hence dsiclk_sel
+	 * bit won't be programmed. e.g. dfps update use case.
+	 */
+	rsc->cached_cfg1 = val;
+
+	return rc;
+}
+
+static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
+static struct dsi_pll_10nm plls[DSI_PLL_MAX];
+
+static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
+{
+	u32 reg;
+	struct mdss_pll_resources *orsc = pll_rsc_db[DSI_PLL_1];
+
+	if (!rsc)
+		return;
+
+	/* Only DSI PLL0 can act as a master */
+	if (rsc->index != DSI_PLL_0)
+		return;
+
+	/* default configuration: source is either internal or ref clock */
+	rsc->slave = NULL;
+
+	if (!orsc) {
+		pr_warn("slave PLL unavilable, assuming standalone config\n");
+		return;
+	}
+
+	/* check to see if the source of DSI1 PLL bitclk is set to external */
+	reg = MDSS_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
+	reg &= (BIT(2) | BIT(3));
+	if (reg == 0x04)
+		rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
+
+	pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
+}
+
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll,
+				 struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+
+	config->ref_freq = 19200000;
+	config->output_div = 1;
+	config->dec_bits = 8;
+	config->frac_bits = 18;
+	config->lock_timer = 64;
+	config->ssc_freq = 31500;
+	config->ssc_offset = 5000;
+	config->ssc_adj_per = 2;
+	config->thresh_cycles = 32;
+	config->refclk_cycles = 256;
+
+	config->div_override = false;
+	config->ignore_frac = false;
+	config->disable_prescaler = false;
+	config->enable_ssc = rsc->ssc_en;
+	config->ssc_center = rsc->ssc_center;
+
+	if (config->enable_ssc) {
+		if (rsc->ssc_freq)
+			config->ssc_freq = rsc->ssc_freq;
+		if (rsc->ssc_ppm)
+			config->ssc_offset = rsc->ssc_ppm;
+	}
+
+	dsi_pll_config_slave(rsc);
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll,
+				  struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u64 fref = rsc->vco_ref_clk_rate;
+	u64 pll_freq;
+	u64 divider;
+	u64 dec, dec_multiple;
+	u32 frac;
+	u64 multiplier;
+
+	pll_freq = rsc->vco_current_rate;
+
+	if (config->disable_prescaler)
+		divider = fref;
+	else
+		divider = fref * 2;
+
+	multiplier = 1 << config->frac_bits;
+	dec_multiple = div_u64(pll_freq * multiplier, divider);
+	div_u64_rem(dec_multiple, multiplier, &frac);
+
+	dec = div_u64(dec_multiple, multiplier);
+
+	if (pll_freq <= MHZ_1900)
+		regs->pll_prop_gain_rate = 8;
+	else if (pll_freq <= MHZ_3000)
+		regs->pll_prop_gain_rate = 10;
+	else
+		regs->pll_prop_gain_rate = 12;
+	if (pll_freq < MHZ_1100)
+		regs->pll_clock_inverters = 8;
+	else
+		regs->pll_clock_inverters = 0;
+
+	regs->pll_lockdet_rate = config->lock_timer;
+	regs->decimal_div_start = dec;
+	regs->frac_div_start_low = (frac & 0xff);
+	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+	regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll,
+		  struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u32 ssc_per;
+	u32 ssc_mod;
+	u64 ssc_step_size;
+	u64 frac;
+
+	if (!config->enable_ssc) {
+		pr_debug("SSC not enabled\n");
+		return;
+	}
+
+	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+	ssc_per -= ssc_mod;
+
+	frac = regs->frac_div_start_low |
+			(regs->frac_div_start_mid << 8) |
+			(regs->frac_div_start_high << 16);
+	ssc_step_size = regs->decimal_div_start;
+	ssc_step_size *= (1 << config->frac_bits);
+	ssc_step_size += frac;
+	ssc_step_size *= config->ssc_offset;
+	ssc_step_size *= (config->ssc_adj_per + 1);
+	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+	regs->ssc_div_per_low = ssc_per & 0xFF;
+	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+			regs->decimal_div_start, frac, config->frac_bits);
+	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+			ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll,
+		struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	if (pll->pll_configuration.enable_ssc) {
+		pr_debug("SSC is enabled\n");
+
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1,
+				regs->ssc_stepsize_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1,
+				regs->ssc_stepsize_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1,
+				regs->ssc_div_per_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1,
+				regs->ssc_div_per_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_LOW_1,
+				regs->ssc_adjper_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_ADJPER_HIGH_1,
+				regs->ssc_adjper_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL,
+				SSC_EN | regs->ssc_control);
+	}
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll,
+				  struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x80);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
+	MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+	MDSS_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SET_RATE_1, 0xc0);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
+	MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x29);
+	MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_init_val(struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x10);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS, 0x3f);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS_TWO, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FOUR, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_CONTROLS, 0x80);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_UPDATE_CONTROL_OVERRIDES, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_LOW, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_HIGH, 0x02);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS, 0x82);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MIN, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MAX, 0xff);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_PFILT, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_IFILT, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_TWO, 0x25);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_FOUR, 0x4f);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_HIGH, 0x0a);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_LOW, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_GAIN, 0x42);
+	MDSS_PLL_REG_W(pll_base, PLL_ICODE_LOW, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_ICODE_HIGH, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_LOCKDET, 0x30);
+	MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_CONTROL, 0x04);
+	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_ONE, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_TWO, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_RATE_CHANGE, 0x01);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS, 0x08);
+	MDSS_PLL_REG_W(pll_base, PLL_DEC_FRAC_MUXES, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_MASH_CONTROL, 0x03);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_MUX_CONTROL, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_EN_BAND, 0x03);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MUX, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_MIN_DELAY, 0x19);
+	MDSS_PLL_REG_W(pll_base, PLL_SPARE_AND_JPC_OVERRIDES, 0x0);
+	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_1, 0x40);
+	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_2, 0x20);
+	MDSS_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_CTRL_1, 0x0);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll,
+			   struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
+	MDSS_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1,
+		       reg->decimal_div_start);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1,
+		       reg->frac_div_start_low);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1,
+		       reg->frac_div_start_mid);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
+		       reg->frac_div_start_high);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
+	MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x10);
+	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
+
+}
+
+static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+	struct dsi_pll_10nm *pll;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	if (rsc->pll_on)
+		return 0;
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+	rsc->vco_current_rate = rate;
+	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+		       rsc->index, rc);
+		return rc;
+	}
+
+	dsi_pll_init_val(rsc);
+
+	dsi_pll_setup_config(pll, rsc);
+
+	dsi_pll_calc_dec_frac(pll, rsc);
+
+	dsi_pll_calc_ssc(pll, rsc);
+
+	dsi_pll_commit(pll, rsc);
+
+	dsi_pll_config_hzindep_reg(pll, rsc);
+
+	dsi_pll_ssc_commit(pll, rsc);
+
+	/* flush, ensure all register writes are done*/
+	wmb();
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
+{
+	int rc;
+	u32 status;
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+
+	rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
+				       status,
+				       ((status & BIT(0)) > 0),
+				       delay_us,
+				       timeout_us);
+	if (rc)
+		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+			pll->index, status);
+
+	return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
+{
+	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
+	ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
+{
+	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
+	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
+	ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct mdss_pll_resources *rsc)
+{
+	u32 data;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
+}
+
+static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc)
+{
+	u32 data;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5)));
+}
+
+static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
+{
+	int rc;
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	dsi_pll_enable_pll_bias(rsc);
+	if (rsc->slave)
+		dsi_pll_enable_pll_bias(rsc->slave);
+
+	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
+	if (rsc->slave)
+		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
+				0x03, rsc->cached_cfg1);
+	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
+
+	/* Start PLL */
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
+
+	/*
+	 * ensure all PLL configurations are written prior to checking
+	 * for PLL lock.
+	 */
+	wmb();
+
+	/* Check for PLL lock */
+	rc = dsi_pll_10nm_lock_status(rsc);
+	if (rc) {
+		pr_err("PLL(%d) lock failed\n", rsc->index);
+		goto error;
+	}
+
+	rsc->pll_on = true;
+
+	dsi_pll_enable_global_clk(rsc);
+	if (rsc->slave)
+		dsi_pll_enable_global_clk(rsc->slave);
+
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
+	if (rsc->slave)
+		MDSS_PLL_REG_W(rsc->slave->phy_base, PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+	return rc;
+}
+
+static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
+{
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
+	dsi_pll_disable_pll_bias(rsc);
+}
+
+static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
+{
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc->pll_on &&
+	    mdss_pll_resource_enable(rsc, true)) {
+		pr_err("failed to enable pll (%d) resources\n", rsc->index);
+		return;
+	}
+
+	rsc->handoff_resources = false;
+
+	pr_debug("stop PLL (%d)\n", rsc->index);
+
+	/*
+	 * To avoid any stray glitches while
+	 * abruptly powering down the PLL
+	 * make sure to gate the clock using
+	 * the clock enable bit before powering
+	 * down the PLL
+	 */
+	dsi_pll_disable_global_clk(rsc);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
+	dsi_pll_disable_sub(rsc);
+	if (rsc->slave) {
+		dsi_pll_disable_global_clk(rsc->slave);
+		dsi_pll_disable_sub(rsc->slave);
+	}
+	/* flush, ensure all register writes are done*/
+	wmb();
+	rsc->pll_on = false;
+}
+
+long vco_10nm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	*parent_rate = rrate;
+
+	return rrate;
+}
+
+static void vco_10nm_unprepare(struct clk_hw *hw)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("dsi pll resources not available\n");
+		return;
+	}
+
+	/*
+	 * During unprepare in continuous splash use case we want driver
+	 * to pick all dividers instead of retaining bootloader configurations.
+	 */
+	if (!pll->handoff_resources) {
+		pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
+							PHY_CMN_CLK_CFG0);
+		pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
+							PLL_PLL_OUTDIV_RATE);
+		pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
+					pll->cached_cfg1, pll->cached_outdiv);
+
+		pll->vco_cached_rate = clk_hw_get_rate(hw);
+	}
+
+	/*
+	 * When continuous splash screen feature is enabled, we need to cache
+	 * the mux configuration for the pixel_clk_src mux clock. The clock
+	 * framework does not call back to re-configure the mux value if it is
+	 * does not change.For such usecases, we need to ensure that the cached
+	 * value is programmed prior to PLL being locked
+	 */
+	if (pll->handoff_resources)
+		pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
+							PHY_CMN_CLK_CFG1);
+	dsi_pll_disable(vco);
+	mdss_pll_resource_enable(pll, false);
+}
+
+static int vco_10nm_prepare(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	/* Skip vco recalculation for continuous splash use case */
+	if (pll->handoff_resources)
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("failed to enable pll (%d) resource, rc=%d\n",
+		       pll->index, rc);
+		return rc;
+	}
+
+	if ((pll->vco_cached_rate != 0) &&
+	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
+				pll->vco_cached_rate);
+		if (rc) {
+			pr_err("pll(%d) set_rate failed, rc=%d\n",
+			       pll->index, rc);
+			mdss_pll_resource_enable(pll, false);
+			return rc;
+		}
+		pr_debug("cfg0=%d, cfg1=%d\n", pll->cached_cfg0,
+			pll->cached_cfg1);
+		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
+					pll->cached_cfg0);
+		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
+					pll->cached_outdiv);
+	}
+	MDSS_PLL_ATRACE_BEGIN("pll_lock");
+	trace_mdss_pll_lock_start((u64)pll->vco_cached_rate,
+			pll->vco_current_rate,
+			pll->cached_cfg0, pll->cached_cfg1,
+			pll->cached_outdiv, pll->resource_ref_cnt);
+	rc = dsi_pll_enable(vco);
+	MDSS_PLL_ATRACE_END("pll_lock");
+	if (rc) {
+		mdss_pll_resource_enable(pll, false);
+		pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+	int rc;
+	u64 ref_clk = vco->ref_clk_rate;
+	u64 vco_rate;
+	u64 multiplier;
+	u32 frac;
+	u32 dec;
+	u32 outdiv;
+	u64 pll_freq, tmp64;
+
+	if (!vco->priv)
+		pr_err("vco priv is null\n");
+
+	if (!pll) {
+		pr_err("pll is null\n");
+		return 0;
+	}
+
+	/*
+	 * Calculate the vco rate from HW registers only for handoff cases.
+	 * For other cases where a vco_10nm_set_rate() has already been
+	 * called, just return the rate that was set earlier. This is due
+	 * to the fact that recalculating VCO rate requires us to read the
+	 * correct value of the pll_out_div divider clock, which is only set
+	 * afterwards.
+	 */
+	if (pll->vco_current_rate != 0) {
+		pr_debug("returning vco rate = %lld\n", pll->vco_current_rate);
+		return pll->vco_current_rate;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("failed to enable pll(%d) resource, rc=%d\n",
+		       pll->index, rc);
+		return 0;
+	}
+
+	if (!dsi_pll_10nm_lock_status(pll))
+		pll->handoff_resources = true;
+
+	dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
+	dec &= 0xFF;
+
+	frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
+	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
+		  0xFF) <<
+		8);
+	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
+		  0x3) <<
+		16);
+
+	/* OUTDIV_1:0 field is (log(outdiv, 2)) */
+	outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
+	outdiv &= 0x3;
+	outdiv = 1 << outdiv;
+
+	/*
+	 * TODO:
+	 *	1. Assumes prescaler is disabled
+	 *	2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+	 **/
+	multiplier = 1 << 18;
+	pll_freq = dec * (ref_clk * 2);
+	tmp64 = (ref_clk * 2 * frac);
+	pll_freq += div_u64(tmp64, multiplier);
+
+	vco_rate = div_u64(pll_freq, outdiv);
+
+	pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
+		 dec, frac, outdiv, vco_rate);
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return (unsigned long)vco_rate;
+}
+
+static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
+{
+	int rc;
+	struct mdss_pll_resources *pll = context;
+	u32 reg_val;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	*div = (reg_val & 0xF0) >> 4;
+
+	/**
+	 * Common clock framework the divider value is interpreted as one less
+	 * hence we return one less for all dividers except when zero
+	 */
+	if (*div != 0)
+		*div -= 1;
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	reg_val &= ~0xF0;
+	reg_val |= (div << 4);
+	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
+{
+	int rc;
+	struct mdss_pll_resources *pll = context;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+	/**
+	 * In common clock framework the divider value provided is one less and
+	 * and hence adjusting the divider value by one prior to writing it to
+	 * hardware
+	 */
+	div++;
+	pixel_clk_set_div_sub(pll, div);
+	if (pll->slave)
+		pixel_clk_set_div_sub(pll->slave, div);
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return 0;
+}
+
+static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
+{
+	int rc;
+	struct mdss_pll_resources *pll = context;
+	u32 reg_val;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	*div = (reg_val & 0x0F);
+
+	/**
+	 *Common clock framework the divider value is interpreted as one less
+	 * hence we return one less for all dividers except when zero
+	 */
+	if (*div != 0)
+		*div -= 1;
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+	reg_val &= ~0x0F;
+	reg_val |= div;
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
+{
+	int rc;
+	struct mdss_pll_resources *rsc = context;
+	struct dsi_pll_8998 *pll;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	/**
+	 * In common clock framework the divider value provided is one less and
+	 * and hence adjusting the divider value by one prior to writing it to
+	 * hardware
+	 */
+	div++;
+
+	bit_clk_set_div_sub(rsc, div);
+	/* For slave PLL, this divider always should be set to 1 */
+	if (rsc->slave)
+		bit_clk_set_div_sub(rsc->slave, 1);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static struct regmap_config dsi_pll_10nm_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x7c0,
+};
+
+static struct regmap_bus pll_regmap_bus = {
+	.reg_write = pll_reg_write,
+	.reg_read = pll_reg_read,
+};
+
+static struct regmap_bus pclk_src_mux_regmap_bus = {
+	.reg_read = pclk_mux_read_sel,
+	.reg_write = pclk_mux_write_sel,
+};
+
+static struct regmap_bus pclk_src_regmap_bus = {
+	.reg_write = pixel_clk_set_div,
+	.reg_read = pixel_clk_get_div,
+};
+
+static struct regmap_bus bitclk_src_regmap_bus = {
+	.reg_write = bit_clk_set_div,
+	.reg_read = bit_clk_get_div,
+};
+
+static const struct clk_ops clk_ops_vco_10nm = {
+	.recalc_rate = vco_10nm_recalc_rate,
+	.set_rate = vco_10nm_set_rate,
+	.round_rate = vco_10nm_round_rate,
+	.prepare = vco_10nm_prepare,
+	.unprepare = vco_10nm_unprepare,
+};
+
+static struct regmap_bus mdss_mux_regmap_bus = {
+	.reg_write = mdss_set_mux_sel,
+	.reg_read = mdss_get_mux_sel,
+};
+
+/*
+ * Clock tree for generating DSI byte and pixel clocks.
+ *
+ *
+ *                  +---------------+
+ *                  |    vco_clk    |
+ *                  +-------+-------+
+ *                          |
+ *                          |
+ *                  +---------------+
+ *                  |  pll_out_div  |
+ *                  |  DIV(1,2,4,8) |
+ *                  +-------+-------+
+ *                          |
+ *                          +-----------------------------+--------+
+ *                          |                             |        |
+ *                  +-------v-------+                     |        |
+ *                  |  bitclk_src   |                     |        |
+ *                  |  DIV(1..15)   |                     |        |
+ *                  +-------+-------+                     |        |
+ *                          |                             |        |
+ *                          +----------+---------+        |        |
+ *   Shadow Path            |          |         |        |        |
+ *       +          +-------v-------+  |  +------v------+ | +------v-------+
+ *       |          |  byteclk_src  |  |  |post_bit_div | | |post_vco_div  |
+ *       |          |  DIV(8)       |  |  |DIV (2)      | | |DIV(4)        |
+ *       |          +-------+-------+  |  +------+------+ | +------+-------+
+ *       |                  |          |         |      | |        |
+ *       |                  |          |         +------+ |        |
+ *       |                  |          +-------------+  | |   +----+
+ *       |         +--------+                        |  | |   |
+ *       |         |                               +-v--v-v---v------+
+ *     +-v---------v----+                           \  pclk_src_mux /
+ *     \  byteclk_mux /                              \             /
+ *      \            /                                +-----+-----+
+ *       +----+-----+                                       |        Shadow Path
+ *            |                                             |             +
+ *            v                                       +-----v------+      |
+ *       dsi_byte_clk                                 |  pclk_src  |      |
+ *                                                    | DIV(1..15) |      |
+ *                                                    +-----+------+      |
+ *                                                          |             |
+ *                                                          |             |
+ *                                                          +--------+    |
+ *                                                                   |    |
+ *                                                               +---v----v----+
+ *                                                                \  pclk_mux /
+ *                                                                 \         /
+ *                                                                  +---+---+
+ *                                                                      |
+ *                                                                      |
+ *                                                                      v
+ *                                                                   dsi_pclk
+ *
+ */
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
+static struct clk_regmap_div dsi0pll_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pll_out_div",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pll_out_div",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_post_vco_div",
+		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_post_vco_div",
+		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_post_bit_div",
+		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_post_bit_div",
+		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_byteclk_mux = {
+	.shift = 0,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0_phy_pll_out_byteclk",
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_byteclk_mux = {
+	.shift = 0,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1_phy_pll_out_byteclk",
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi0pll_bitclk_src",
+					"dsi0pll_post_bit_div",
+					"dsi0pll_pll_out_div",
+					"dsi0pll_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi1pll_bitclk_src",
+					"dsi1pll_post_bit_div",
+					"dsi1pll_pll_out_div",
+					"dsi1pll_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi1pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_mux = {
+	.shift = 0,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0_phy_pll_out_dsiclk",
+			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_mux = {
+	.shift = 0,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1_phy_pll_out_dsiclk",
+			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
+	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+	[PLL_OUT_DIV_0_CLK] = &dsi0pll_pll_out_div.clkr.hw,
+	[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
+	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.hw,
+	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.hw,
+	[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
+	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
+	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
+	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.hw,
+	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.hw,
+	[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
+	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+};
+
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
+				  struct mdss_pll_resources *pll_res)
+{
+	int rc = 0, ndx, i;
+	struct clk *clk;
+	struct clk_onecell_data *clk_data;
+	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_10nm);
+	struct regmap *rmap;
+
+	ndx = pll_res->index;
+
+	if (ndx >= DSI_PLL_MAX) {
+		pr_err("pll index(%d) NOT supported\n", ndx);
+		return -EINVAL;
+	}
+
+	pll_rsc_db[ndx] = pll_res;
+	plls[ndx].rsc = pll_res;
+	pll_res->priv = &plls[ndx];
+	pll_res->vco_delay = VCO_DELAY_USEC;
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
+				sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
+	/* Establish client data */
+	if (ndx == 0) {
+
+		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pll_out_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_byteclk_mux.clkr.regmap = rmap;
+
+		dsi0pll_vco_clk.priv = pll_res;
+		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_10nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+							pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+
+
+	} else {
+		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pll_out_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_byteclk_mux.clkr.regmap = rmap;
+		dsi1pll_vco_clk.priv = pll_res;
+
+		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_10nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+						pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	}
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d, clocks successfully\n",
+				ndx);
+
+		return rc;
+	}
+clk_register_fail:
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
new file mode 100644
index 0000000..2b62bc5
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c
@@ -0,0 +1,1148 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-14nm.h"
+
+#define DSI_PLL_POLL_MAX_READS                  15
+#define DSI_PLL_POLL_TIMEOUT_US                 1000
+#define MSM8996_DSI_PLL_REVISION_2		2
+
+#define VCO_REF_CLK_RATE 19200000
+
+#define CEIL(x, y)		(((x) + ((y)-1)) / (y))
+
+static int mdss_pll_read_stored_trim_codes(
+		struct mdss_pll_resources *dsi_pll_res, s64 vco_clk_rate)
+{
+	int i;
+	int rc = 0;
+	bool found = false;
+
+	if (!dsi_pll_res->dfps) {
+		rc = -EINVAL;
+		goto end_read;
+	}
+
+	for (i = 0; i < dsi_pll_res->dfps->panel_dfps.frame_rate_cnt; i++) {
+		struct dfps_codes_info *codes_info =
+			&dsi_pll_res->dfps->codes_dfps[i];
+
+		pr_debug("valid=%d frame_rate=%d, vco_rate=%d, code %d %d\n",
+			codes_info->is_valid, codes_info->frame_rate,
+			codes_info->clk_rate, codes_info->pll_codes.pll_codes_1,
+			codes_info->pll_codes.pll_codes_2);
+
+		if (vco_clk_rate != codes_info->clk_rate &&
+				codes_info->is_valid)
+			continue;
+
+		dsi_pll_res->cache_pll_trim_codes[0] =
+			codes_info->pll_codes.pll_codes_1;
+		dsi_pll_res->cache_pll_trim_codes[1] =
+			codes_info->pll_codes.pll_codes_2;
+		found = true;
+		break;
+	}
+
+	if (!found) {
+		rc = -EINVAL;
+		goto end_read;
+	}
+
+	pr_debug("core_kvco_code=0x%x core_vco_tune=0x%x\n",
+			dsi_pll_res->cache_pll_trim_codes[0],
+			dsi_pll_res->cache_pll_trim_codes[1]);
+
+end_read:
+	return rc;
+}
+
+int post_n1_div_set_div(void *context, unsigned int reg, unsigned int div)
+{
+	struct mdss_pll_resources *pll = context;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	int rc;
+	u32 n1div = 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/* in common clock framework the divider value provided is one less */
+	div++;
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	pout = &pdb->out;
+
+	/*
+	 * vco rate = bit_clk * postdiv * n1div
+	 * vco range from 1300 to 2600 Mhz
+	 * postdiv = 1
+	 * n1div = 1 to 15
+	 * n1div = roundup(1300Mhz / bit_clk)
+	 * support bit_clk above 86.67Mhz
+	 */
+
+	pout->pll_n1div  = div;
+
+	n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n1div &= ~0xf;
+	n1div |= (div & 0xf);
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n1div);
+	/* ensure n1 divider is programed */
+	wmb();
+	pr_debug("ndx=%d div=%d postdiv=%x n1div=%x\n",
+			pll->index, div, pout->pll_postdiv, pout->pll_n1div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return 0;
+}
+
+int post_n1_div_get_div(void *context, unsigned int reg, unsigned int *div)
+{
+	int rc;
+	struct mdss_pll_resources *pll = context;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	pout = &pdb->out;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/*
+	 * postdiv = 1/2/4/8
+	 * n1div = 1 - 15
+	 * fot the time being, assume postdiv = 1
+	 */
+
+	*div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	*div &= 0xF;
+
+	/*
+	 * initialize n1div here, it will get updated when
+	 * corresponding set_div is called.
+	 */
+	pout->pll_n1div = *div;
+
+	/* common clock framework will add one to the divider value sent */
+	if (*div == 0)
+		*div = 1; /* value of zero means div is 2 as per SWI */
+	else
+		*div -= 1;
+
+	pr_debug("post n1 get div = %d\n", *div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+int n2_div_set_div(void *context, unsigned int reg, unsigned int div)
+{
+	int rc;
+	u32 n2div;
+	struct mdss_pll_resources *pll = context;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	struct mdss_pll_resources *slave;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/*
+	 * in common clock framework the actual divider value
+	 * provided is one less.
+	 */
+	div++;
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	pout = &pdb->out;
+
+	/* this is for pixel clock */
+	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n2div &= ~0xf0;	/* bits 4 to 7 */
+	n2div |= (div << 4);
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+	/* commit slave if split display is enabled */
+	slave = pll->slave;
+	if (slave)
+		MDSS_PLL_REG_W(slave->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+	pout->pll_n2div = div;
+
+	/* set dsiclk_sel=1 so that n2div *= 2 */
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG1, 1);
+	pr_debug("ndx=%d div=%d n2div=%x\n", pll->index, div, n2div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+int shadow_n2_div_set_div(void *context, unsigned int reg, unsigned int div)
+{
+	struct mdss_pll_resources *pll = context;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	u32 data;
+
+	pdb = pll->priv;
+	pout = &pdb->out;
+
+	/*
+	 * in common clock framework the actual divider value
+	 * provided is one less.
+	 */
+	div++;
+
+	pout->pll_n2div = div;
+
+	data = (pout->pll_n1div | (pout->pll_n2div << 4));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+			DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+			DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_CLK_CFG1,
+			data, 1);
+	return 0;
+}
+
+int n2_div_get_div(void *context, unsigned int reg, unsigned int *div)
+{
+	int rc;
+	u32 n2div;
+	struct mdss_pll_resources *pll = context;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	pout = &pdb->out;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll=%d resources\n",
+						pll->index);
+		return rc;
+	}
+
+	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n2div >>= 4;
+	n2div &= 0x0f;
+	/*
+	 * initialize n2div here, it will get updated when
+	 * corresponding set_div is called.
+	 */
+	pout->pll_n2div = n2div;
+	mdss_pll_resource_enable(pll, false);
+
+	*div = n2div;
+
+	/* common clock framework will add one to the divider value sent */
+	if (*div == 0)
+		*div = 1; /* value of zero means div is 2 as per SWI */
+	else
+		*div -= 1;
+
+	pr_debug("ndx=%d div=%d\n", pll->index, *div);
+
+	return rc;
+}
+
+static bool pll_is_pll_locked_14nm(struct mdss_pll_resources *pll)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((pll->pll_base +
+			DSIPHY_PLL_RESET_SM_READY_STATUS),
+			status,
+			((status & BIT(5)) > 0),
+			DSI_PLL_POLL_MAX_READS,
+			DSI_PLL_POLL_TIMEOUT_US)) {
+		pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+				pll->index, status);
+		pll_locked = false;
+	} else if (readl_poll_timeout_atomic((pll->pll_base +
+				DSIPHY_PLL_RESET_SM_READY_STATUS),
+				status,
+				((status & BIT(0)) > 0),
+				DSI_PLL_POLL_MAX_READS,
+				DSI_PLL_POLL_TIMEOUT_US)) {
+		pr_err("DSI PLL ndx=%d status=%x PLl not ready\n",
+				pll->index, status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+static void dsi_pll_start_14nm(void __iomem *pll_base)
+{
+	pr_debug("start PLL at base=%pK\n", pll_base);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VREF_CFG1, 0x10);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
+}
+
+static void dsi_pll_stop_14nm(void __iomem *pll_base)
+{
+	pr_debug("stop PLL at base=%pK\n", pll_base);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+}
+
+int dsi_pll_enable_seq_14nm(struct mdss_pll_resources *pll)
+{
+	int rc = 0;
+
+	if (!pll) {
+		pr_err("Invalid PLL resources\n");
+		return -EINVAL;
+	}
+
+	dsi_pll_start_14nm(pll->pll_base);
+
+	/*
+	 * both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
+	 * enabled at mdss_dsi_14nm_phy_config()
+	 */
+
+	if (!pll_is_pll_locked_14nm(pll)) {
+		pr_err("DSI PLL ndx=%d lock failed\n", pll->index);
+		rc = -EINVAL;
+		goto init_lock_err;
+	}
+
+	pr_debug("DSI PLL ndx=%d Lock success\n", pll->index);
+
+init_lock_err:
+	return rc;
+}
+
+static int dsi_pll_enable(struct clk_hw *hw)
+{
+	int i, rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	/* Try all enable sequences until one succeeds */
+	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+		rc = vco->pll_enable_seqs[i](pll);
+		pr_debug("DSI PLL %s after sequence #%d\n",
+			rc ? "unlocked" : "locked", i + 1);
+		if (!rc)
+			break;
+	}
+
+	if (rc)
+		pr_err("ndx=%d DSI PLL failed to lock\n", pll->index);
+	else
+		pll->pll_on = true;
+
+	return rc;
+}
+
+static void dsi_pll_disable(struct clk_hw *hw)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct mdss_pll_resources *slave;
+
+	if (!pll->pll_on &&
+		mdss_pll_resource_enable(pll, true)) {
+		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+		return;
+	}
+
+	pll->handoff_resources = false;
+	slave = pll->slave;
+
+	dsi_pll_stop_14nm(pll->pll_base);
+
+	mdss_pll_resource_enable(pll, false);
+
+	pll->pll_on = false;
+
+	pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
+}
+
+static void mdss_dsi_pll_14nm_input_init(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	pdb->in.fref = 19200000;	/* 19.2 Mhz*/
+	pdb->in.fdata = 0;		/* bit clock rate */
+	pdb->in.dsiclk_sel = 1;		/* 1, reg: 0x0014 */
+	pdb->in.ssc_en = pll->ssc_en;		/* 1, reg: 0x0494, bit 0 */
+	pdb->in.ldo_en = 0;		/* 0,  reg: 0x004c, bit 0 */
+
+	/* fixed  input */
+	pdb->in.refclk_dbler_en = 0;	/* 0, reg: 0x04c0, bit 1 */
+	pdb->in.vco_measure_time = 5;	/* 5, unknown */
+	pdb->in.kvco_measure_time = 5;	/* 5, unknown */
+	pdb->in.bandgap_timer = 4;	/* 4, reg: 0x0430, bit 3 - 5 */
+	pdb->in.pll_wakeup_timer = 5;	/* 5, reg: 0x043c, bit 0 - 2 */
+	pdb->in.plllock_cnt = 1;	/* 1, reg: 0x0488, bit 1 - 2 */
+	pdb->in.plllock_rng = 0;	/* 0, reg: 0x0488, bit 3 - 4 */
+	pdb->in.ssc_center = pll->ssc_center;/* 0, reg: 0x0494, bit 1 */
+	pdb->in.ssc_adj_period = 37;	/* 37, reg: 0x498, bit 0 - 9 */
+	pdb->in.ssc_spread = pll->ssc_ppm / 1000;
+	pdb->in.ssc_freq = pll->ssc_freq;
+
+	pdb->in.pll_ie_trim = 4;	/* 4, reg: 0x0400 */
+	pdb->in.pll_ip_trim = 4;	/* 4, reg: 0x0404 */
+	pdb->in.pll_cpcset_cur = 1;	/* 1, reg: 0x04f0, bit 0 - 2 */
+	pdb->in.pll_cpmset_cur = 1;	/* 1, reg: 0x04f0, bit 3 - 5 */
+	pdb->in.pll_icpmset = 4;	/* 4, reg: 0x04fc, bit 3 - 5 */
+	pdb->in.pll_icpcset = 4;	/* 4, reg: 0x04fc, bit 0 - 2 */
+	pdb->in.pll_icpmset_p = 0;	/* 0, reg: 0x04f4, bit 0 - 2 */
+	pdb->in.pll_icpmset_m = 0;	/* 0, reg: 0x04f4, bit 3 - 5 */
+	pdb->in.pll_icpcset_p = 0;	/* 0, reg: 0x04f8, bit 0 - 2 */
+	pdb->in.pll_icpcset_m = 0;	/* 0, reg: 0x04f8, bit 3 - 5 */
+	pdb->in.pll_lpf_res1 = 3;	/* 3, reg: 0x0504, bit 0 - 3 */
+	pdb->in.pll_lpf_cap1 = 11;	/* 11, reg: 0x0500, bit 0 - 3 */
+	pdb->in.pll_lpf_cap2 = 1;	/* 1, reg: 0x0500, bit 4 - 7 */
+	pdb->in.pll_iptat_trim = 7;
+	pdb->in.pll_c3ctrl = 2;		/* 2 */
+	pdb->in.pll_r3ctrl = 1;		/* 1 */
+	pdb->out.pll_postdiv = 1;
+}
+
+static void pll_14nm_ssc_calc(struct mdss_pll_resources *pll,
+				struct dsi_pll_db *pdb)
+{
+	u32 period, ssc_period;
+	u32 ref, rem;
+	s64 step_size;
+
+	pr_debug("%s: vco=%lld ref=%lld\n", __func__,
+		pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+	ssc_period = pdb->in.ssc_freq / 500;
+	period = (unsigned long)pll->vco_ref_clk_rate / 1000;
+	ssc_period  = CEIL(period, ssc_period);
+	ssc_period -= 1;
+	pdb->out.ssc_period = ssc_period;
+
+	pr_debug("%s: ssc, freq=%d spread=%d period=%d\n", __func__,
+	pdb->in.ssc_freq, pdb->in.ssc_spread, pdb->out.ssc_period);
+
+	step_size = (u32)pll->vco_current_rate;
+	ref = pll->vco_ref_clk_rate;
+	ref /= 1000;
+	step_size = div_s64(step_size, ref);
+	step_size <<= 20;
+	step_size = div_s64(step_size, 1000);
+	step_size *= pdb->in.ssc_spread;
+	step_size = div_s64(step_size, 1000);
+	step_size *= (pdb->in.ssc_adj_period + 1);
+
+	rem = 0;
+	step_size = div_s64_rem(step_size, ssc_period + 1, &rem);
+	if (rem)
+		step_size++;
+
+	pr_debug("%s: step_size=%lld\n", __func__, step_size);
+
+	step_size &= 0x0ffff;	/* take lower 16 bits */
+
+	pdb->out.ssc_step_size = step_size;
+}
+
+static void pll_14nm_dec_frac_calc(struct mdss_pll_resources *pll,
+				struct dsi_pll_db *pdb)
+{
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	u64 multiplier = BIT(20);
+	u64 dec_start_multiple, dec_start, pll_comp_val;
+	s32 duration, div_frac_start;
+	s64 vco_clk_rate = pll->vco_current_rate;
+	s64 fref = pll->vco_ref_clk_rate;
+
+	pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n",
+				vco_clk_rate, fref);
+
+	dec_start_multiple = div_s64(vco_clk_rate * multiplier, fref);
+	div_s64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+	dec_start = div_s64(dec_start_multiple, multiplier);
+
+	pout->dec_start = (u32)dec_start;
+	pout->div_frac_start = div_frac_start;
+
+	if (pin->plllock_cnt == 0)
+		duration = 1024;
+	else if (pin->plllock_cnt == 1)
+		duration = 256;
+	else if (pin->plllock_cnt == 2)
+		duration = 128;
+	else
+		duration = 32;
+
+	pll_comp_val =  duration * dec_start_multiple;
+	pll_comp_val =  div_u64(pll_comp_val, multiplier);
+	do_div(pll_comp_val, 10);
+
+	pout->plllock_cmp = (u32)pll_comp_val;
+
+	pout->pll_txclk_en = 1;
+	if (pll->revision == MSM8996_DSI_PLL_REVISION_2)
+		pout->cmn_ldo_cntrl = 0x3c;
+	else
+		pout->cmn_ldo_cntrl = 0x1c;
+}
+
+static u32 pll_14nm_kvco_slop(u32 vrate)
+{
+	u32 slop = 0;
+
+	if (vrate > 1300000000UL && vrate <= 1800000000UL)
+		slop =  600;
+	else if (vrate > 1800000000UL && vrate < 2300000000UL)
+		slop = 400;
+	else if (vrate > 2300000000UL && vrate < 2600000000UL)
+		slop = 280;
+
+	return slop;
+}
+
+static void pll_14nm_calc_vco_count(struct dsi_pll_db *pdb,
+			 s64 vco_clk_rate, s64 fref)
+{
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	u64 data;
+	u32 cnt;
+
+	data = fref * pin->vco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 2;
+	pout->pll_vco_div_ref = data;
+
+	data = (unsigned long)vco_clk_rate / 1000000;	/* unit is Mhz */
+	data *= pin->vco_measure_time;
+	do_div(data, 10);
+	pout->pll_vco_count = data; /* reg: 0x0474, 0x0478 */
+
+	data = fref * pin->kvco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 1;
+	pout->pll_kvco_div_ref = data;
+
+	cnt = pll_14nm_kvco_slop(vco_clk_rate);
+	cnt *= 2;
+	cnt /= 100;
+	cnt *= pin->kvco_measure_time;
+	pout->pll_kvco_count = cnt;
+
+	pout->pll_misc1 = 16;
+	pout->pll_resetsm_cntrl = 48;
+	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+	pout->pll_kvco_code = 0;
+}
+
+static void pll_db_commit_ssc(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	data = pin->ssc_adj_period;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER1, data);
+	data = (pin->ssc_adj_period >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER2, data);
+
+	data = pout->ssc_period;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER1, data);
+	data = (pout->ssc_period >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER2, data);
+
+	data = pout->ssc_step_size;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE1, data);
+	data = (pout->ssc_step_size >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE2, data);
+
+	data = (pin->ssc_center & 0x01);
+	data <<= 1;
+	data |= 0x01; /* enable */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_EN_CENTER, data);
+
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_common(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	/* confgiure the non frequency dependent pll registers */
+	data = 0;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SYSCLK_EN_RESET, data);
+
+	/* DSIPHY_PLL_CLKBUFLR_EN updated at dsi phy */
+
+	data = pout->pll_txclk_en;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_TXCLK_EN, data);
+
+	data = pout->pll_resetsm_cntrl;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL, data);
+	data = pout->pll_resetsm_cntrl2;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL2, data);
+	data = pout->pll_resetsm_cntrl5;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL5, data);
+
+	data = pout->pll_vco_div_ref;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF1, data);
+	data = (pout->pll_vco_div_ref >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF2, data);
+
+	data = pout->pll_kvco_div_ref;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF1, data);
+	data = (pout->pll_kvco_div_ref >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF2, data);
+
+	data = pout->pll_misc1;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_MISC1, data);
+
+	data = pin->pll_ie_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IE_TRIM, data);
+
+	data = pin->pll_ip_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IP_TRIM, data);
+
+	data = ((pin->pll_cpmset_cur << 3) | pin->pll_cpcset_cur);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CP_SET_CUR, data);
+
+	data = ((pin->pll_icpcset_p << 3) | pin->pll_icpcset_m);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPCSET, data);
+
+	data = ((pin->pll_icpmset_p << 3) | pin->pll_icpcset_m);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPMSET, data);
+
+	data = ((pin->pll_icpmset << 3) | pin->pll_icpcset);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICP_SET, data);
+
+	data = ((pdb->in.pll_lpf_cap2 << 4) | pdb->in.pll_lpf_cap1);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF1, data);
+
+	data = pin->pll_iptat_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IPTAT_TRIM, data);
+
+	data = (pdb->in.pll_c3ctrl | (pdb->in.pll_r3ctrl << 4));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_db_commit_14nm(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	data = pout->cmn_ldo_cntrl;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_LDO_CNTRL, data);
+
+	pll_db_commit_common(pll, pdb);
+
+	/* de assert pll start and apply pll sw reset */
+	/* stop pll */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+
+	/* pll sw reset */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0x20);
+	wmb();	/* make sure register committed */
+	udelay(10);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0);
+	wmb();	/* make sure register committed */
+
+	data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1  */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data);
+
+	data = 0xff; /* data, clk, pll normal operation */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
+
+	/* confgiure the frequency dependent pll registers */
+	data = pout->dec_start;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DEC_START, data);
+
+	data = pout->div_frac_start;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START1, data);
+	data = (pout->div_frac_start >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START2, data);
+	data = (pout->div_frac_start >> 16);
+	data &= 0x0f;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START3, data);
+
+	data = pout->plllock_cmp;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP1, data);
+	data = (pout->plllock_cmp >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP2, data);
+	data = (pout->plllock_cmp >> 16);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP3, data);
+
+	data = ((pin->plllock_cnt << 1) | (pin->plllock_rng << 3));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP_EN, data);
+
+	data = pout->pll_vco_count;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT1, data);
+	data = (pout->pll_vco_count >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT2, data);
+
+	data = pout->pll_kvco_count;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT1, data);
+	data = (pout->pll_kvco_count >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT2, data);
+
+	/*
+	 * tx_band = pll_postdiv
+	 * 0: divided by 1 <== for now
+	 * 1: divided by 2
+	 * 2: divided by 4
+	 * 3: divided by 8
+	 */
+	data = (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF2_POSTDIV, data);
+
+	data = (pout->pll_n1div | (pout->pll_n2div << 4));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG0, data);
+
+	if (pll->ssc_en)
+		pll_db_commit_ssc(pll, pdb);
+
+	wmb();	/* make sure register committed */
+}
+
+/*
+ * pll_source_finding:
+ * Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
+ * at mdss_dsi_14nm_phy_config()
+ */
+static int pll_source_finding(struct mdss_pll_resources *pll)
+{
+	u32 clk_buf_en;
+	u32 glbl_test_ctrl;
+
+	glbl_test_ctrl = MDSS_PLL_REG_R(pll->pll_base,
+				DSIPHY_CMN_GLBL_TEST_CTRL);
+	clk_buf_en = MDSS_PLL_REG_R(pll->pll_base,
+				DSIPHY_PLL_CLKBUFLR_EN);
+
+	glbl_test_ctrl &= BIT(2);
+	glbl_test_ctrl >>= 2;
+
+	pr_debug("%s: pll=%d clk_buf_en=%x glbl_test_ctrl=%x\n",
+		__func__, pll->index, clk_buf_en, glbl_test_ctrl);
+
+	clk_buf_en &= (PLL_OUTPUT_RIGHT | PLL_OUTPUT_LEFT);
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+			(clk_buf_en == PLL_OUTPUT_BOTH))
+		return PLL_MASTER;
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_RIGHT) &&
+			(clk_buf_en == PLL_OUTPUT_NONE))
+		return PLL_SLAVE;
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+			(clk_buf_en == PLL_OUTPUT_RIGHT))
+		return PLL_STANDALONE;
+
+	pr_debug("%s: Error pll setup, clk_buf_en=%x glbl_test_ctrl=%x\n",
+			__func__, clk_buf_en, glbl_test_ctrl);
+
+	return PLL_UNKNOWN;
+}
+
+static void pll_source_setup(struct mdss_pll_resources *pll)
+{
+	int status;
+	struct dsi_pll_db *pdb = (struct dsi_pll_db *)pll->priv;
+	struct mdss_pll_resources *other;
+
+	if (pdb->source_setup_done)
+		return;
+
+	pdb->source_setup_done++;
+
+	status = pll_source_finding(pll);
+
+	if (status == PLL_STANDALONE || status == PLL_UNKNOWN)
+		return;
+
+	other = pdb->next->pll;
+	if (!other)
+		return;
+
+	pr_debug("%s: status=%d pll=%d other=%d\n", __func__,
+			status, pll->index, other->index);
+
+	if (status == PLL_MASTER)
+		pll->slave = other;
+	else
+		other->slave = pll;
+}
+
+unsigned long pll_vco_recalc_rate_14nm(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+	u64 vco_rate, multiplier = BIT(20);
+	s32 div_frac_start;
+	u32 dec_start;
+	u64 ref_clk = vco->ref_clk_rate;
+	int rc;
+
+	if (pll->vco_current_rate)
+		return (unsigned long)pll->vco_current_rate;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+		return rc;
+	}
+
+	dec_start = MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DEC_START);
+	dec_start &= 0x0ff;
+	pr_debug("dec_start = 0x%x\n", dec_start);
+
+	div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
+	div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
+	div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
+	pr_debug("div_frac_start = 0x%x\n", div_frac_start);
+
+	vco_rate = ref_clk * dec_start;
+	vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(pll, false);
+
+	pr_debug("%s: returning vco rate as %lu\n",
+			__func__, (unsigned long)vco_rate);
+	return (unsigned long)vco_rate;
+}
+
+int pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct mdss_pll_resources *slave;
+	struct dsi_pll_db *pdb;
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	if (!pdb) {
+		pr_err("No prov found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	pll_source_setup(pll);
+
+	pr_debug("%s: ndx=%d base=%pK rate=%lu slave=%pK\n", __func__,
+				pll->index, pll->pll_base, rate, pll->slave);
+
+	pll->vco_current_rate = rate;
+	pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	mdss_dsi_pll_14nm_input_init(pll, pdb);
+
+	pll_14nm_dec_frac_calc(pll, pdb);
+
+	if (pll->ssc_en)
+		pll_14nm_ssc_calc(pll, pdb);
+
+	pll_14nm_calc_vco_count(pdb, pll->vco_current_rate,
+					pll->vco_ref_clk_rate);
+
+	/* commit slave if split display is enabled */
+	slave = pll->slave;
+	if (slave)
+		pll_db_commit_14nm(slave, pdb);
+
+	/* commit master itself */
+	pll_db_commit_14nm(pll, pdb);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+static void shadow_pll_dynamic_refresh_14nm(struct mdss_pll_resources *pll,
+							struct dsi_pll_db *pdb)
+{
+	struct dsi_pll_output *pout = &pdb->out;
+
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+		DSIPHY_CMN_CTRL_0, DSIPHY_PLL_SYSCLK_EN_RESET,
+		0xFF, 0x0);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+		DSIPHY_PLL_DEC_START, DSIPHY_PLL_DIV_FRAC_START1,
+		pout->dec_start, (pout->div_frac_start & 0x0FF));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+		DSIPHY_PLL_DIV_FRAC_START2, DSIPHY_PLL_DIV_FRAC_START3,
+		((pout->div_frac_start >> 8) & 0x0FF),
+		((pout->div_frac_start >> 16) & 0x0F));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+		DSIPHY_PLL_PLLLOCK_CMP1, DSIPHY_PLL_PLLLOCK_CMP2,
+		(pout->plllock_cmp & 0x0FF),
+		((pout->plllock_cmp >> 8) & 0x0FF));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+		DSIPHY_PLL_PLLLOCK_CMP3, DSIPHY_PLL_PLL_VCO_TUNE,
+		((pout->plllock_cmp >> 16) & 0x03),
+		(pll->cache_pll_trim_codes[1] | BIT(7))); /* VCO tune*/
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+		DSIPHY_PLL_KVCO_CODE, DSIPHY_PLL_RESETSM_CNTRL,
+		(pll->cache_pll_trim_codes[0] | BIT(5)), 0x38);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+		DSIPHY_PLL_PLL_LPF2_POSTDIV, DSIPHY_CMN_PLL_CNTRL,
+		(((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1), 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0000001E);
+	MDSS_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x001FFE00);
+
+	/*
+	 * Ensure all the dynamic refresh registers are written before
+	 * dynamic refresh to change the fps is triggered
+	 */
+	wmb();
+}
+
+int shadow_pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct dsi_pll_db *pdb;
+	s64 vco_clk_rate = (s64)rate;
+
+	if (!pll) {
+		pr_err("PLL data not found\n");
+		return -EINVAL;
+	}
+
+	pdb = pll->priv;
+	if (!pdb) {
+		pr_err("No priv data found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_read_stored_trim_codes(pll, vco_clk_rate);
+	if (rc) {
+		pr_err("cannot find pll codes rate=%lld\n", vco_clk_rate);
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	pr_debug("%s: ndx=%d base=%pK rate=%lu\n", __func__,
+			pll->index, pll->pll_base, rate);
+
+	pll->vco_current_rate = rate;
+	pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	mdss_dsi_pll_14nm_input_init(pll, pdb);
+
+	pll_14nm_dec_frac_calc(pll, pdb);
+
+	pll_14nm_calc_vco_count(pdb, pll->vco_current_rate,
+			pll->vco_ref_clk_rate);
+
+	shadow_pll_dynamic_refresh_14nm(pll, pdb);
+
+	rc = mdss_pll_resource_enable(pll, false);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	return rc;
+}
+
+long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
+						unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	u32 div;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+
+	div = vco->min_rate / rate;
+	if (div > 15) {
+		/* rate < 86.67 Mhz */
+		pr_err("rate=%lu NOT supportted\n", rate);
+		return -EINVAL;
+	}
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	*parent_rate = rrate;
+	return rrate;
+}
+
+int pll_vco_prepare_14nm(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("Dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("ndx=%d Failed to enable mdss dsi pll resources\n",
+							pll->index);
+		return rc;
+	}
+
+	if ((pll->vco_cached_rate != 0)
+	    && (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
+						pll->vco_cached_rate);
+		if (rc) {
+			pr_err("index=%d vco_set_rate failed. rc=%d\n",
+					rc, pll->index);
+			mdss_pll_resource_enable(pll, false);
+			goto error;
+		}
+	}
+
+	rc = dsi_pll_enable(hw);
+
+	if (rc) {
+		mdss_pll_resource_enable(pll, false);
+		pr_err("ndx=%d failed to enable dsi pll\n", pll->index);
+	}
+
+error:
+	return rc;
+}
+
+void pll_vco_unprepare_14nm(struct clk_hw *hw)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("Dsi pll resources are not available\n");
+		return;
+	}
+
+	pll->vco_cached_rate = clk_hw_get_rate(hw);
+	dsi_pll_disable(hw);
+}
+
+int dsi_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val)
+{
+	return 0;
+}
+
+int dsi_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val)
+{
+	*val = 0;
+	return 0;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c
new file mode 100644
index 0000000..739e428
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.c
@@ -0,0 +1,592 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-14nm.h"
+#include <dt-bindings/clock/mdss-14nm-pll-clk.h>
+
+#define VCO_DELAY_USEC		1
+
+static struct dsi_pll_db pll_db[DSI_PLL_NUM];
+
+static struct regmap_config dsi_pll_14nm_config = {
+	.reg_bits	= 32,
+	.reg_stride	= 4,
+	.val_bits	= 32,
+	.max_register = 0x588,
+};
+
+static struct regmap_bus post_n1_div_regmap_bus = {
+	.reg_write = post_n1_div_set_div,
+	.reg_read = post_n1_div_get_div,
+};
+
+static struct regmap_bus n2_div_regmap_bus = {
+	.reg_write = n2_div_set_div,
+	.reg_read = n2_div_get_div,
+};
+
+static struct regmap_bus shadow_n2_div_regmap_bus = {
+	.reg_write = shadow_n2_div_set_div,
+	.reg_read = n2_div_get_div,
+};
+
+static struct regmap_bus dsi_mux_regmap_bus = {
+	.reg_write = dsi_mux_set_parent_14nm,
+	.reg_read = dsi_mux_get_parent_14nm,
+};
+
+/* Op structures */
+static const struct clk_ops clk_ops_dsi_vco = {
+	.recalc_rate = pll_vco_recalc_rate_14nm,
+	.set_rate = pll_vco_set_rate_14nm,
+	.round_rate = pll_vco_round_rate_14nm,
+	.prepare = pll_vco_prepare_14nm,
+	.unprepare = pll_vco_unprepare_14nm,
+};
+
+/* Shadow ops for dynamic refresh */
+static const struct clk_ops clk_ops_shadow_dsi_vco = {
+	.recalc_rate = pll_vco_recalc_rate_14nm,
+	.set_rate = shadow_pll_vco_set_rate_14nm,
+	.round_rate = pll_vco_round_rate_14nm,
+};
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1300000000UL,
+	.max_rate = 2600000000UL,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_vco_clk_14nm",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_ops_dsi_vco,
+		},
+};
+
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000u,
+	.min_rate = 1300000000u,
+	.max_rate = 2600000000u,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_vco_clk_14nm",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_dsi_vco,
+		},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1300000000UL,
+	.max_rate = 2600000000UL,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_vco_clk_14nm",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_ops_dsi_vco,
+		},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000u,
+	.min_rate = 1300000000u,
+	.max_rate = 2600000000u,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_14nm,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_vco_clk_14nm",
+			.parent_names = (const char *[]){ "bi_tcxo" },
+			.num_parents = 1,
+			.ops = &clk_ops_shadow_dsi_vco,
+		},
+};
+
+static struct clk_regmap_div dsi0pll_post_n1_div_clk = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_post_n1_div_clk",
+			.parent_names =
+				(const char *[]){ "dsi0pll_vco_clk_14nm" },
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_shadow_post_n1_div_clk = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_post_n1_div_clk",
+			.parent_names =
+				(const char *[]){"dsi0pll_shadow_vco_clk_14nm"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_post_n1_div_clk = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_post_n1_div_clk",
+			.parent_names =
+				(const char *[]){ "dsi1pll_vco_clk_14nm" },
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_shadow_post_n1_div_clk = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_post_n1_div_clk",
+			.parent_names =
+				(const char *[]){"dsi1pll_shadow_vco_clk_14nm"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_n2_div_clk = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_n2_div_clk",
+			.parent_names =
+				(const char *[]){ "dsi0pll_post_n1_div_clk" },
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_shadow_n2_div_clk = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_shadow_n2_div_clk",
+			.parent_names =
+			(const char *[]){ "dsi0pll_shadow_post_n1_div_clk" },
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_n2_div_clk = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_n2_div_clk",
+			.parent_names =
+				(const char *[]){ "dsi1pll_post_n1_div_clk" },
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_shadow_n2_div_clk = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_shadow_n2_div_clk",
+			.parent_names =
+			(const char *[]){ "dsi1pll_shadow_post_n1_div_clk" },
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_pixel_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_pixel_clk_src",
+		.parent_names = (const char *[]){ "dsi0pll_n2_div_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_pixel_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_pixel_clk_src",
+		.parent_names = (const char *[]){ "dsi0pll_shadow_n2_div_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_pixel_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_pixel_clk_src",
+		.parent_names = (const char *[]){ "dsi1pll_n2_div_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_pixel_clk_src = {
+	.div = 2,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_pixel_clk_src",
+		.parent_names = (const char *[]){ "dsi1pll_shadow_n2_div_clk" },
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pixel_clk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 1,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0_phy_pll_out_dsiclk",
+			.parent_names =
+				(const char *[]){ "dsi0pll_pixel_clk_src",
+					"dsi0pll_shadow_pixel_clk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pixel_clk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 1,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pixel_clk_mux",
+			.parent_names =
+				(const char *[]){ "dsi1pll_pixel_clk_src",
+					"dsi1pll_shadow_pixel_clk_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_byte_clk_src = {
+	.div = 8,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_byte_clk_src",
+		.parent_names = (const char *[]){ "dsi0pll_post_n1_div_clk" },
+		.num_parents = 1,
+		.flags = (CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_shadow_byte_clk_src = {
+	.div = 8,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_shadow_byte_clk_src",
+		.parent_names =
+			(const char *[]){ "dsi0pll_shadow_post_n1_div_clk" },
+		.num_parents = 1,
+		.flags = (CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_byte_clk_src = {
+	.div = 8,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_byte_clk_src",
+		.parent_names = (const char *[]){ "dsi1pll_post_n1_div_clk" },
+		.num_parents = 1,
+		.flags = (CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_shadow_byte_clk_src = {
+	.div = 8,
+	.mult = 1,
+
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_shadow_byte_clk_src",
+		.parent_names =
+			(const char *[]){ "dsi1pll_shadow_post_n1_div_clk" },
+		.num_parents = 1,
+		.flags = (CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_byte_clk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 1,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0_phy_pll_out_byteclk",
+			.parent_names =
+				(const char *[]){"dsi0pll_byte_clk_src",
+					"dsi0pll_shadow_byte_clk_src"},
+			.num_parents = 2,
+			.ops = &clk_regmap_mux_closest_ops,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_byte_clk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 1,
+
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_byte_clk_mux",
+			.parent_names =
+				(const char *[]){"dsi1pll_byte_clk_src",
+					"dsi1pll_shadow_byte_clk_src"},
+			.num_parents = 2,
+			.ops = &clk_regmap_mux_closest_ops,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		},
+	},
+};
+
+static struct clk_hw *mdss_dsi_pllcc_14nm[] = {
+	[BYTE0_MUX_CLK] = &dsi0pll_byte_clk_mux.clkr.hw,
+	[BYTE0_SRC_CLK] = &dsi0pll_byte_clk_src.hw,
+	[PIX0_MUX_CLK] = &dsi0pll_pixel_clk_mux.clkr.hw,
+	[PIX0_SRC_CLK] = &dsi0pll_pixel_clk_src.hw,
+	[N2_DIV_0_CLK] = &dsi0pll_n2_div_clk.clkr.hw,
+	[POST_N1_DIV_0_CLK] = &dsi0pll_post_n1_div_clk.clkr.hw,
+	[VCO_CLK_0_CLK] = &dsi0pll_vco_clk.hw,
+	[SHADOW_BYTE0_SRC_CLK] = &dsi0pll_shadow_byte_clk_src.hw,
+	[SHADOW_PIX0_SRC_CLK] = &dsi0pll_shadow_pixel_clk_src.hw,
+	[SHADOW_N2_DIV_0_CLK] = &dsi0pll_shadow_n2_div_clk.clkr.hw,
+	[SHADOW_POST_N1_DIV_0_CLK] = &dsi0pll_shadow_post_n1_div_clk.clkr.hw,
+	[SHADOW_VCO_CLK_0_CLK] = &dsi0pll_shadow_vco_clk.hw,
+	[BYTE1_MUX_CLK] = &dsi1pll_byte_clk_mux.clkr.hw,
+	[BYTE1_SRC_CLK] = &dsi1pll_byte_clk_src.hw,
+	[PIX1_MUX_CLK] = &dsi1pll_pixel_clk_mux.clkr.hw,
+	[PIX1_SRC_CLK] = &dsi1pll_pixel_clk_src.hw,
+	[N2_DIV_1_CLK] = &dsi1pll_n2_div_clk.clkr.hw,
+	[POST_N1_DIV_1_CLK] = &dsi1pll_post_n1_div_clk.clkr.hw,
+	[VCO_CLK_1_CLK] = &dsi1pll_vco_clk.hw,
+	[SHADOW_BYTE1_SRC_CLK] = &dsi1pll_shadow_byte_clk_src.hw,
+	[SHADOW_PIX1_SRC_CLK] = &dsi1pll_shadow_pixel_clk_src.hw,
+	[SHADOW_N2_DIV_1_CLK] = &dsi1pll_shadow_n2_div_clk.clkr.hw,
+	[SHADOW_POST_N1_DIV_1_CLK] = &dsi1pll_shadow_post_n1_div_clk.clkr.hw,
+	[SHADOW_VCO_CLK_1_CLK] = &dsi1pll_shadow_vco_clk.hw,
+};
+
+int dsi_pll_clock_register_14nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0, ndx, i;
+	int const ssc_freq_default = 31500; /* default h/w recommended value */
+	int const ssc_ppm_default = 5000; /* default h/w recommended value */
+	struct dsi_pll_db *pdb;
+	struct clk_onecell_data *clk_data;
+	struct clk *clk;
+	struct regmap *regmap;
+	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_14nm);
+
+	if (pll_res->index >= DSI_PLL_NUM) {
+		pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
+		return -EINVAL;
+	}
+
+	ndx = pll_res->index;
+	pdb = &pll_db[ndx];
+	pll_res->priv = pdb;
+	pdb->pll = pll_res;
+	ndx++;
+	ndx %= DSI_PLL_NUM;
+	pdb->next = &pll_db[ndx];
+
+	if (pll_res->ssc_en) {
+		if (!pll_res->ssc_freq)
+			pll_res->ssc_freq = ssc_freq_default;
+		if (!pll_res->ssc_ppm)
+			pll_res->ssc_ppm = ssc_ppm_default;
+	}
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
+				sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
+	/* Set client data to mux, div and vco clocks.  */
+	if (pll_res->index == DSI_PLL_1) {
+		regmap = devm_regmap_init(&pdev->dev, &post_n1_div_regmap_bus,
+					pll_res, &dsi_pll_14nm_config);
+		dsi1pll_post_n1_div_clk.clkr.regmap = regmap;
+		dsi1pll_shadow_post_n1_div_clk.clkr.regmap = regmap;
+
+		regmap = devm_regmap_init(&pdev->dev, &n2_div_regmap_bus,
+				pll_res, &dsi_pll_14nm_config);
+		dsi1pll_n2_div_clk.clkr.regmap = regmap;
+
+		regmap = devm_regmap_init(&pdev->dev, &shadow_n2_div_regmap_bus,
+				pll_res, &dsi_pll_14nm_config);
+		dsi1pll_shadow_n2_div_clk.clkr.regmap = regmap;
+
+		regmap = devm_regmap_init(&pdev->dev, &dsi_mux_regmap_bus,
+				pll_res, &dsi_pll_14nm_config);
+		dsi1pll_byte_clk_mux.clkr.regmap = regmap;
+		dsi1pll_pixel_clk_mux.clkr.regmap = regmap;
+
+		dsi1pll_vco_clk.priv = pll_res;
+		dsi1pll_shadow_vco_clk.priv = pll_res;
+
+		pll_res->vco_delay = VCO_DELAY_USEC;
+
+		for (i = BYTE1_MUX_CLK; i <= SHADOW_VCO_CLK_1_CLK; i++) {
+			pr_debug("register clk: %d index: %d\n",
+							i, pll_res->index);
+			clk = devm_clk_register(&pdev->dev,
+					mdss_dsi_pllcc_14nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI: %d\n",
+						pll_res->index);
+				rc = -EINVAL;
+				goto clk_reg_fail;
+			}
+			clk_data->clks[i] = clk;
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	} else {
+		regmap = devm_regmap_init(&pdev->dev, &post_n1_div_regmap_bus,
+					pll_res, &dsi_pll_14nm_config);
+		dsi0pll_post_n1_div_clk.clkr.regmap = regmap;
+		dsi0pll_shadow_post_n1_div_clk.clkr.regmap = regmap;
+
+		regmap = devm_regmap_init(&pdev->dev, &n2_div_regmap_bus,
+				pll_res, &dsi_pll_14nm_config);
+		dsi0pll_n2_div_clk.clkr.regmap = regmap;
+
+		regmap = devm_regmap_init(&pdev->dev, &shadow_n2_div_regmap_bus,
+				pll_res, &dsi_pll_14nm_config);
+		dsi0pll_shadow_n2_div_clk.clkr.regmap = regmap;
+
+		regmap = devm_regmap_init(&pdev->dev, &dsi_mux_regmap_bus,
+				pll_res, &dsi_pll_14nm_config);
+		dsi0pll_byte_clk_mux.clkr.regmap = regmap;
+		dsi0pll_pixel_clk_mux.clkr.regmap = regmap;
+
+		dsi0pll_vco_clk.priv = pll_res;
+		dsi0pll_shadow_vco_clk.priv = pll_res;
+		pll_res->vco_delay = VCO_DELAY_USEC;
+
+		for (i = BYTE0_MUX_CLK; i <= SHADOW_VCO_CLK_0_CLK; i++) {
+			pr_debug("reg clk: %d index: %d\n", i, pll_res->index);
+			clk = devm_clk_register(&pdev->dev,
+					mdss_dsi_pllcc_14nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI: %d\n",
+						pll_res->index);
+				rc = -EINVAL;
+				goto clk_reg_fail;
+			}
+			clk_data->clks[i] = clk;
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	}
+
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
+						pll_res->index);
+		return rc;
+	}
+
+clk_reg_fail:
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h
new file mode 100644
index 0000000..e290687
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef MDSS_DSI_PLL_14NM_H
+#define MDSS_DSI_PLL_14NM_H
+
+#define DSIPHY_CMN_CLK_CFG0		0x0010
+#define DSIPHY_CMN_CLK_CFG1		0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL	0x0018
+
+#define DSIPHY_CMN_PLL_CNTRL		0x0048
+#define DSIPHY_CMN_CTRL_0		0x001c
+#define DSIPHY_CMN_CTRL_1		0x0020
+
+#define DSIPHY_CMN_LDO_CNTRL		0x004c
+
+#define DSIPHY_PLL_IE_TRIM		0x0400
+#define DSIPHY_PLL_IP_TRIM		0x0404
+
+#define DSIPHY_PLL_IPTAT_TRIM		0x0410
+
+#define DSIPHY_PLL_CLKBUFLR_EN		0x041c
+
+#define DSIPHY_PLL_SYSCLK_EN_RESET	0x0428
+#define DSIPHY_PLL_RESETSM_CNTRL	0x042c
+#define DSIPHY_PLL_RESETSM_CNTRL2	0x0430
+#define DSIPHY_PLL_RESETSM_CNTRL3	0x0434
+#define DSIPHY_PLL_RESETSM_CNTRL4	0x0438
+#define DSIPHY_PLL_RESETSM_CNTRL5	0x043c
+#define DSIPHY_PLL_KVCO_DIV_REF1	0x0440
+#define DSIPHY_PLL_KVCO_DIV_REF2	0x0444
+#define DSIPHY_PLL_KVCO_COUNT1		0x0448
+#define DSIPHY_PLL_KVCO_COUNT2		0x044c
+#define DSIPHY_PLL_VREF_CFG1		0x045c
+
+#define DSIPHY_PLL_KVCO_CODE		0x0458
+
+#define DSIPHY_PLL_VCO_DIV_REF1		0x046c
+#define DSIPHY_PLL_VCO_DIV_REF2		0x0470
+#define DSIPHY_PLL_VCO_COUNT1		0x0474
+#define DSIPHY_PLL_VCO_COUNT2		0x0478
+#define DSIPHY_PLL_PLLLOCK_CMP1		0x047c
+#define DSIPHY_PLL_PLLLOCK_CMP2		0x0480
+#define DSIPHY_PLL_PLLLOCK_CMP3		0x0484
+#define DSIPHY_PLL_PLLLOCK_CMP_EN	0x0488
+#define DSIPHY_PLL_PLL_VCO_TUNE		0x048C
+#define DSIPHY_PLL_DEC_START		0x0490
+#define DSIPHY_PLL_SSC_EN_CENTER	0x0494
+#define DSIPHY_PLL_SSC_ADJ_PER1		0x0498
+#define DSIPHY_PLL_SSC_ADJ_PER2		0x049c
+#define DSIPHY_PLL_SSC_PER1		0x04a0
+#define DSIPHY_PLL_SSC_PER2		0x04a4
+#define DSIPHY_PLL_SSC_STEP_SIZE1	0x04a8
+#define DSIPHY_PLL_SSC_STEP_SIZE2	0x04ac
+#define DSIPHY_PLL_DIV_FRAC_START1	0x04b4
+#define DSIPHY_PLL_DIV_FRAC_START2	0x04b8
+#define DSIPHY_PLL_DIV_FRAC_START3	0x04bc
+#define DSIPHY_PLL_TXCLK_EN		0x04c0
+#define DSIPHY_PLL_PLL_CRCTRL		0x04c4
+
+#define DSIPHY_PLL_RESET_SM_READY_STATUS 0x04cc
+
+#define DSIPHY_PLL_PLL_MISC1		0x04e8
+
+#define DSIPHY_PLL_CP_SET_CUR		0x04f0
+#define DSIPHY_PLL_PLL_ICPMSET		0x04f4
+#define DSIPHY_PLL_PLL_ICPCSET		0x04f8
+#define DSIPHY_PLL_PLL_ICP_SET		0x04fc
+#define DSIPHY_PLL_PLL_LPF1		0x0500
+#define DSIPHY_PLL_PLL_LPF2_POSTDIV	0x0504
+#define DSIPHY_PLL_PLL_BANDGAP	0x0508
+
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		0x050
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		0x060
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		0x064
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		0x068
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		0x06C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		0x070
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		0x074
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		0x078
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		0x07C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		0x080
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		0x084
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		0x088
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	0x094
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	0x098
+
+struct dsi_pll_input {
+	u32 fref;	/* 19.2 Mhz, reference clk */
+	u32 fdata;	/* bit clock rate */
+	u32 dsiclk_sel; /* 1, reg: 0x0014 */
+	u32 n2div;	/* 1, reg: 0x0010, bit 4-7 */
+	u32 ssc_en;	/* 1, reg: 0x0494, bit 0 */
+	u32 ldo_en;	/* 0,  reg: 0x004c, bit 0 */
+
+	/* fixed  */
+	u32 refclk_dbler_en;	/* 0, reg: 0x04c0, bit 1 */
+	u32 vco_measure_time;	/* 5, unknown */
+	u32 kvco_measure_time;	/* 5, unknown */
+	u32 bandgap_timer;	/* 4, reg: 0x0430, bit 3 - 5 */
+	u32 pll_wakeup_timer;	/* 5, reg: 0x043c, bit 0 - 2 */
+	u32 plllock_cnt;	/* 1, reg: 0x0488, bit 1 - 2 */
+	u32 plllock_rng;	/* 1, reg: 0x0488, bit 3 - 4 */
+	u32 ssc_center;		/* 0, reg: 0x0494, bit 1 */
+	u32 ssc_adj_period;	/* 37, reg: 0x498, bit 0 - 9 */
+	u32 ssc_spread;		/* 0.005  */
+	u32 ssc_freq;		/* unknown */
+	u32 pll_ie_trim;	/* 4, reg: 0x0400 */
+	u32 pll_ip_trim;	/* 4, reg: 0x0404 */
+	u32 pll_iptat_trim;	/* reg: 0x0410 */
+	u32 pll_cpcset_cur;	/* 1, reg: 0x04f0, bit 0 - 2 */
+	u32 pll_cpmset_cur;	/* 1, reg: 0x04f0, bit 3 - 5 */
+
+	u32 pll_icpmset;	/* 4, reg: 0x04fc, bit 3 - 5 */
+	u32 pll_icpcset;	/* 4, reg: 0x04fc, bit 0 - 2 */
+
+	u32 pll_icpmset_p;	/* 0, reg: 0x04f4, bit 0 - 2 */
+	u32 pll_icpmset_m;	/* 0, reg: 0x04f4, bit 3 - 5 */
+
+	u32 pll_icpcset_p;	/* 0, reg: 0x04f8, bit 0 - 2 */
+	u32 pll_icpcset_m;	/* 0, reg: 0x04f8, bit 3 - 5 */
+
+	u32 pll_lpf_res1;	/* 3, reg: 0x0504, bit 0 - 3 */
+	u32 pll_lpf_cap1;	/* 11, reg: 0x0500, bit 0 - 3 */
+	u32 pll_lpf_cap2;	/* 1, reg: 0x0500, bit 4 - 7 */
+	u32 pll_c3ctrl;		/* 2, reg: 0x04c4 */
+	u32 pll_r3ctrl;		/* 1, reg: 0x04c4 */
+};
+
+struct dsi_pll_output {
+	u32 pll_txclk_en;	/* reg: 0x04c0 */
+	u32 dec_start;		/* reg: 0x0490 */
+	u32 div_frac_start;	/* reg: 0x04b4, 0x4b8, 0x04bc */
+	u32 ssc_period;		/* reg: 0x04a0, 0x04a4 */
+	u32 ssc_step_size;	/* reg: 0x04a8, 0x04ac */
+	u32 plllock_cmp;	/* reg: 0x047c, 0x0480, 0x0484 */
+	u32 pll_vco_div_ref;	/* reg: 0x046c, 0x0470 */
+	u32 pll_vco_count;	/* reg: 0x0474, 0x0478 */
+	u32 pll_kvco_div_ref;	/* reg: 0x0440, 0x0444 */
+	u32 pll_kvco_count;	/* reg: 0x0448, 0x044c */
+	u32 pll_misc1;		/* reg: 0x04e8 */
+	u32 pll_lpf2_postdiv;	/* reg: 0x0504 */
+	u32 pll_resetsm_cntrl;	/* reg: 0x042c */
+	u32 pll_resetsm_cntrl2;	/* reg: 0x0430 */
+	u32 pll_resetsm_cntrl5;	/* reg: 0x043c */
+	u32 pll_kvco_code;		/* reg: 0x0458 */
+
+	u32 cmn_clk_cfg0;	/* reg: 0x0010 */
+	u32 cmn_clk_cfg1;	/* reg: 0x0014 */
+	u32 cmn_ldo_cntrl;	/* reg: 0x004c */
+
+	u32 pll_postdiv;	/* vco */
+	u32 pll_n1div;		/* vco */
+	u32 pll_n2div;		/* hr_oclk3, pixel */
+	u32 fcvo;
+};
+
+enum {
+	DSI_PLL_0,
+	DSI_PLL_1,
+	DSI_PLL_NUM
+};
+
+struct dsi_pll_db {
+	struct dsi_pll_db *next;
+	struct mdss_pll_resources *pll;
+	struct dsi_pll_input in;
+	struct dsi_pll_output out;
+	int source_setup_done;
+};
+
+enum {
+	PLL_OUTPUT_NONE,
+	PLL_OUTPUT_RIGHT,
+	PLL_OUTPUT_LEFT,
+	PLL_OUTPUT_BOTH
+};
+
+enum {
+	PLL_SOURCE_FROM_LEFT,
+	PLL_SOURCE_FROM_RIGHT
+};
+
+enum {
+	PLL_UNKNOWN,
+	PLL_STANDALONE,
+	PLL_SLAVE,
+	PLL_MASTER
+};
+
+int pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate);
+int shadow_pll_vco_set_rate_14nm(struct clk_hw *hw, unsigned long rate,
+		unsigned long parent_rate);
+long pll_vco_round_rate_14nm(struct clk_hw *hw, unsigned long rate,
+		unsigned long *parent_rate);
+unsigned long pll_vco_recalc_rate_14nm(struct clk_hw *hw,
+		unsigned long parent_rate);
+
+int pll_vco_prepare_14nm(struct clk_hw *hw);
+void pll_vco_unprepare_14nm(struct clk_hw *hw);
+
+int shadow_post_n1_div_set_div(void *context,
+			unsigned int reg, unsigned int div);
+int shadow_post_n1_div_get_div(void *context,
+			unsigned int reg, unsigned int *div);
+int shadow_n2_div_set_div(void *context, unsigned int reg, unsigned int div);
+int shadow_n2_div_get_div(void *context, unsigned int reg, unsigned int *div);
+
+int post_n1_div_set_div(void *context, unsigned int reg, unsigned int div);
+int post_n1_div_get_div(void *context, unsigned int reg, unsigned int *div);
+int n2_div_set_div(void *context, unsigned int reg, unsigned int div);
+int n2_div_get_div(void *context, unsigned int reg, unsigned int *div);
+int dsi_pll_enable_seq_14nm(struct mdss_pll_resources *pll);
+int dsi_mux_set_parent_14nm(void *context, unsigned int reg, unsigned int val);
+int dsi_mux_get_parent_14nm(void *context, unsigned int reg, unsigned int *val);
+
+#endif  /* MDSS_DSI_PLL_14NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-20nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-20nm.c
new file mode 100644
index 0000000..badc9df
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-20nm.c
@@ -0,0 +1,590 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/workqueue.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8994.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+
+#define VCO_DELAY_USEC		1
+
+static const struct clk_ops bypass_lp_div_mux_clk_ops;
+static const struct clk_ops pixel_clk_src_ops;
+static const struct clk_ops byte_clk_src_ops;
+static const struct clk_ops ndiv_clk_ops;
+
+static const struct clk_ops shadow_pixel_clk_src_ops;
+static const struct clk_ops shadow_byte_clk_src_ops;
+static const struct clk_ops clk_ops_gen_mux_dsi;
+
+static int vco_set_rate_20nm(struct clk *c, unsigned long rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	pr_debug("Cancel pending pll off work\n");
+	cancel_work_sync(&dsi_pll_res->pll_off);
+	rc = pll_20nm_vco_set_rate(vco, rate);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+static int pll1_vco_set_rate_20nm(struct clk *c, unsigned long rate)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll_res = vco->priv;
+
+	mdss_pll_resource_enable(pll_res, true);
+	__dsi_pll_disable(pll_res->pll_base);
+	mdss_pll_resource_enable(pll_res, false);
+
+	pr_debug("Configuring PLL1 registers.\n");
+
+	return 0;
+}
+
+static int shadow_vco_set_rate_20nm(struct clk *c, unsigned long rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (!dsi_pll_res->resource_enable) {
+		pr_err("PLL resources disabled. Dynamic fps invalid\n");
+		return -EINVAL;
+	}
+
+	rc = shadow_pll_20nm_vco_set_rate(vco, rate);
+
+	return rc;
+}
+
+/* Op structures */
+
+static const struct clk_ops pll1_clk_ops_dsi_vco = {
+	.set_rate = pll1_vco_set_rate_20nm,
+};
+
+static const struct clk_ops clk_ops_dsi_vco = {
+	.set_rate = vco_set_rate_20nm,
+	.round_rate = pll_20nm_vco_round_rate,
+	.handoff = pll_20nm_vco_handoff,
+	.prepare = pll_20nm_vco_prepare,
+	.unprepare = pll_20nm_vco_unprepare,
+};
+
+static struct clk_div_ops fixed_hr_oclk2_div_ops = {
+	.set_div = fixed_hr_oclk2_set_div,
+	.get_div = fixed_hr_oclk2_get_div,
+};
+
+static struct clk_div_ops ndiv_ops = {
+	.set_div = ndiv_set_div,
+	.get_div = ndiv_get_div,
+};
+
+static struct clk_div_ops hr_oclk3_div_ops = {
+	.set_div = hr_oclk3_set_div,
+	.get_div = hr_oclk3_get_div,
+};
+
+static struct clk_mux_ops bypass_lp_div_mux_ops = {
+	.set_mux_sel = set_bypass_lp_div_mux_sel,
+	.get_mux_sel = get_bypass_lp_div_mux_sel,
+};
+
+static const struct clk_ops shadow_clk_ops_dsi_vco = {
+	.set_rate = shadow_vco_set_rate_20nm,
+	.round_rate = pll_20nm_vco_round_rate,
+	.handoff = pll_20nm_vco_handoff,
+};
+
+static struct clk_div_ops shadow_fixed_hr_oclk2_div_ops = {
+	.set_div = shadow_fixed_hr_oclk2_set_div,
+	.get_div = fixed_hr_oclk2_get_div,
+};
+
+static struct clk_div_ops shadow_ndiv_ops = {
+	.set_div = shadow_ndiv_set_div,
+	.get_div = ndiv_get_div,
+};
+
+static struct clk_div_ops shadow_hr_oclk3_div_ops = {
+	.set_div = shadow_hr_oclk3_set_div,
+	.get_div = hr_oclk3_get_div,
+};
+
+static struct clk_mux_ops shadow_bypass_lp_div_mux_ops = {
+	.set_mux_sel = set_shadow_bypass_lp_div_mux_sel,
+	.get_mux_sel = get_bypass_lp_div_mux_sel,
+};
+
+static struct clk_mux_ops mdss_byte_mux_ops = {
+	.set_mux_sel = set_mdss_byte_mux_sel,
+	.get_mux_sel = get_mdss_byte_mux_sel,
+};
+
+static struct clk_mux_ops mdss_pixel_mux_ops = {
+	.set_mux_sel = set_mdss_pixel_mux_sel,
+	.get_mux_sel = get_mdss_pixel_mux_sel,
+};
+
+static struct dsi_pll_vco_clk mdss_dsi1_vco_clk_src = {
+	.c = {
+		.dbg_name = "mdss_dsi1_vco_clk_src",
+		.ops = &pll1_clk_ops_dsi_vco,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(mdss_dsi1_vco_clk_src.c),
+	},
+};
+
+static struct dsi_pll_vco_clk dsi_vco_clk_8994 = {
+	.ref_clk_rate = 19200000,
+	.min_rate = 300000000,
+	.max_rate = 1500000000,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = pll_20nm_vco_enable_seq,
+	.c = {
+		.dbg_name = "dsi_vco_clk_8994",
+		.ops = &clk_ops_dsi_vco,
+		CLK_INIT(dsi_vco_clk_8994.c),
+	},
+};
+
+static struct dsi_pll_vco_clk shadow_dsi_vco_clk_8994 = {
+	.ref_clk_rate = 19200000,
+	.min_rate = 300000000,
+	.max_rate = 1500000000,
+	.c = {
+		.dbg_name = "shadow_dsi_vco_clk_8994",
+		.ops = &shadow_clk_ops_dsi_vco,
+		CLK_INIT(shadow_dsi_vco_clk_8994.c),
+	},
+};
+
+static struct div_clk ndiv_clk_8994 = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &ndiv_ops,
+	.c = {
+		.parent = &dsi_vco_clk_8994.c,
+		.dbg_name = "ndiv_clk_8994",
+		.ops = &ndiv_clk_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(ndiv_clk_8994.c),
+	},
+};
+
+static struct div_clk shadow_ndiv_clk_8994 = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_ndiv_ops,
+	.c = {
+		.parent = &shadow_dsi_vco_clk_8994.c,
+		.dbg_name = "shadow_ndiv_clk_8994",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(shadow_ndiv_clk_8994.c),
+	},
+};
+
+static struct div_clk indirect_path_div2_clk_8994 = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &ndiv_clk_8994.c,
+		.dbg_name = "indirect_path_div2_clk_8994",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(indirect_path_div2_clk_8994.c),
+	},
+};
+
+static struct div_clk shadow_indirect_path_div2_clk_8994 = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &shadow_ndiv_clk_8994.c,
+		.dbg_name = "shadow_indirect_path_div2_clk_8994",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(shadow_indirect_path_div2_clk_8994.c),
+	},
+};
+
+static struct div_clk hr_oclk3_div_clk_8994 = {
+	.data = {
+		.max_div = 255,
+		.min_div = 1,
+	},
+	.ops = &hr_oclk3_div_ops,
+	.c = {
+		.parent = &dsi_vco_clk_8994.c,
+		.dbg_name = "hr_oclk3_div_clk_8994",
+		.ops = &pixel_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(hr_oclk3_div_clk_8994.c),
+	},
+};
+
+static struct div_clk shadow_hr_oclk3_div_clk_8994 = {
+	.data = {
+		.max_div = 255,
+		.min_div = 1,
+	},
+	.ops = &shadow_hr_oclk3_div_ops,
+	.c = {
+		.parent = &shadow_dsi_vco_clk_8994.c,
+		.dbg_name = "shadow_hr_oclk3_div_clk_8994",
+		.ops = &shadow_pixel_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(shadow_hr_oclk3_div_clk_8994.c),
+	},
+};
+
+static struct div_clk pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &hr_oclk3_div_clk_8994.c,
+		.dbg_name = "pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(pixel_clk_src.c),
+	},
+};
+
+static struct div_clk shadow_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &shadow_hr_oclk3_div_clk_8994.c,
+		.dbg_name = "shadow_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(shadow_pixel_clk_src.c),
+	},
+};
+
+static struct mux_clk bypass_lp_div_mux_8994 = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]){
+		{&dsi_vco_clk_8994.c, 0},
+		{&indirect_path_div2_clk_8994.c, 1},
+	},
+	.ops = &bypass_lp_div_mux_ops,
+	.c = {
+		.parent = &dsi_vco_clk_8994.c,
+		.dbg_name = "bypass_lp_div_mux_8994",
+		.ops = &bypass_lp_div_mux_clk_ops,
+		CLK_INIT(bypass_lp_div_mux_8994.c),
+	},
+};
+
+static struct mux_clk shadow_bypass_lp_div_mux_8994 = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]){
+		{&shadow_dsi_vco_clk_8994.c, 0},
+		{&shadow_indirect_path_div2_clk_8994.c, 1},
+	},
+	.ops = &shadow_bypass_lp_div_mux_ops,
+	.c = {
+		.parent = &shadow_dsi_vco_clk_8994.c,
+		.dbg_name = "shadow_bypass_lp_div_mux_8994",
+		.ops = &clk_ops_gen_mux,
+		CLK_INIT(shadow_bypass_lp_div_mux_8994.c),
+	},
+};
+
+static struct div_clk fixed_hr_oclk2_div_clk_8994 = {
+	.ops = &fixed_hr_oclk2_div_ops,
+	.data = {
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.c = {
+		.parent = &bypass_lp_div_mux_8994.c,
+		.dbg_name = "fixed_hr_oclk2_div_clk_8994",
+		.ops = &byte_clk_src_ops,
+		CLK_INIT(fixed_hr_oclk2_div_clk_8994.c),
+	},
+};
+
+static struct div_clk shadow_fixed_hr_oclk2_div_clk_8994 = {
+	.ops = &shadow_fixed_hr_oclk2_div_ops,
+	.data = {
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.c = {
+		.parent = &shadow_bypass_lp_div_mux_8994.c,
+		.dbg_name = "shadow_fixed_hr_oclk2_div_clk_8994",
+		.ops = &shadow_byte_clk_src_ops,
+		CLK_INIT(shadow_fixed_hr_oclk2_div_clk_8994.c),
+	},
+};
+
+static struct div_clk byte_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &fixed_hr_oclk2_div_clk_8994.c,
+		.dbg_name = "byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(byte_clk_src.c),
+	},
+};
+
+static struct div_clk shadow_byte_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &shadow_fixed_hr_oclk2_div_clk_8994.c,
+		.dbg_name = "shadow_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(shadow_byte_clk_src.c),
+	},
+};
+
+static struct mux_clk mdss_pixel_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&pixel_clk_src.c, 0},
+		{&shadow_pixel_clk_src.c, 1},
+	},
+	.ops = &mdss_pixel_mux_ops,
+	.c = {
+		.parent = &pixel_clk_src.c,
+		.dbg_name = "mdss_pixel_clk_mux",
+		.ops = &clk_ops_gen_mux,
+		CLK_INIT(mdss_pixel_clk_mux.c),
+	}
+};
+
+static struct mux_clk mdss_byte_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&byte_clk_src.c, 0},
+		{&shadow_byte_clk_src.c, 1},
+	},
+	.ops = &mdss_byte_mux_ops,
+	.c = {
+		.parent = &byte_clk_src.c,
+		.dbg_name = "mdss_byte_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		CLK_INIT(mdss_byte_clk_mux.c),
+	}
+};
+
+static struct clk_lookup mdss_dsi_pll_1_cc_8994[] = {
+	CLK_LIST(mdss_dsi1_vco_clk_src),
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8994[] = {
+	CLK_LIST(mdss_pixel_clk_mux),
+	CLK_LIST(mdss_byte_clk_mux),
+	CLK_LIST(pixel_clk_src),
+	CLK_LIST(byte_clk_src),
+	CLK_LIST(fixed_hr_oclk2_div_clk_8994),
+	CLK_LIST(bypass_lp_div_mux_8994),
+	CLK_LIST(hr_oclk3_div_clk_8994),
+	CLK_LIST(indirect_path_div2_clk_8994),
+	CLK_LIST(ndiv_clk_8994),
+	CLK_LIST(dsi_vco_clk_8994),
+	CLK_LIST(shadow_pixel_clk_src),
+	CLK_LIST(shadow_byte_clk_src),
+	CLK_LIST(shadow_fixed_hr_oclk2_div_clk_8994),
+	CLK_LIST(shadow_bypass_lp_div_mux_8994),
+	CLK_LIST(shadow_hr_oclk3_div_clk_8994),
+	CLK_LIST(shadow_indirect_path_div2_clk_8994),
+	CLK_LIST(shadow_ndiv_clk_8994),
+	CLK_LIST(shadow_dsi_vco_clk_8994),
+};
+
+static void dsi_pll_off_work(struct work_struct *work)
+{
+	struct mdss_pll_resources *pll_res;
+
+	if (!work) {
+		pr_err("pll_resource is invalid\n");
+		return;
+	}
+
+	pr_debug("Starting PLL off Worker%s\n", __func__);
+
+	pll_res = container_of(work, struct
+			mdss_pll_resources, pll_off);
+
+	mdss_pll_resource_enable(pll_res, true);
+	__dsi_pll_disable(pll_res->pll_base);
+	if (pll_res->pll_1_base)
+		__dsi_pll_disable(pll_res->pll_1_base);
+	mdss_pll_resource_enable(pll_res, false);
+}
+
+static int dsi_pll_regulator_notifier_call(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+
+	struct mdss_pll_resources *pll_res;
+
+	if (!self) {
+		pr_err("pll_resource is invalid\n");
+		goto error;
+	}
+
+	pll_res = container_of(self, struct
+			mdss_pll_resources, gdsc_cb);
+
+	if (event & REGULATOR_EVENT_ENABLE) {
+		pr_debug("Regulator ON event. Scheduling pll off worker\n");
+		schedule_work(&pll_res->pll_off);
+	}
+
+	if (event & REGULATOR_EVENT_DISABLE)
+		pr_debug("Regulator OFF event.\n");
+
+error:
+	return NOTIFY_OK;
+}
+
+int dsi_pll_clock_register_20nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc;
+	struct dss_vreg *pll_reg;
+
+	/*
+	 * Set client data to mux, div and vco clocks.
+	 * This needs to be done only for PLL0 since, that is the one in
+	 * use.
+	 **/
+	if (!pll_res->index) {
+		byte_clk_src.priv = pll_res;
+		pixel_clk_src.priv = pll_res;
+		bypass_lp_div_mux_8994.priv = pll_res;
+		indirect_path_div2_clk_8994.priv = pll_res;
+		ndiv_clk_8994.priv = pll_res;
+		fixed_hr_oclk2_div_clk_8994.priv = pll_res;
+		hr_oclk3_div_clk_8994.priv = pll_res;
+		dsi_vco_clk_8994.priv = pll_res;
+
+		shadow_byte_clk_src.priv = pll_res;
+		shadow_pixel_clk_src.priv = pll_res;
+		shadow_bypass_lp_div_mux_8994.priv = pll_res;
+		shadow_indirect_path_div2_clk_8994.priv = pll_res;
+		shadow_ndiv_clk_8994.priv = pll_res;
+		shadow_fixed_hr_oclk2_div_clk_8994.priv = pll_res;
+		shadow_hr_oclk3_div_clk_8994.priv = pll_res;
+		shadow_dsi_vco_clk_8994.priv = pll_res;
+
+		pll_res->vco_delay = VCO_DELAY_USEC;
+
+		/* Set clock source operations */
+		pixel_clk_src_ops = clk_ops_slave_div;
+		pixel_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+		ndiv_clk_ops = clk_ops_div;
+		ndiv_clk_ops.prepare = dsi_pll_div_prepare;
+
+		byte_clk_src_ops = clk_ops_div;
+		byte_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+		bypass_lp_div_mux_clk_ops = clk_ops_gen_mux;
+		bypass_lp_div_mux_clk_ops.prepare = dsi_pll_mux_prepare;
+
+		clk_ops_gen_mux_dsi = clk_ops_gen_mux;
+		clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
+		clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
+
+		shadow_pixel_clk_src_ops = clk_ops_slave_div;
+		shadow_pixel_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+		shadow_byte_clk_src_ops = clk_ops_div;
+		shadow_byte_clk_src_ops.prepare = dsi_pll_div_prepare;
+	} else {
+		mdss_dsi1_vco_clk_src.priv = pll_res;
+	}
+
+	if ((pll_res->target_id == MDSS_PLL_TARGET_8994) ||
+			(pll_res->target_id == MDSS_PLL_TARGET_8992)) {
+		if (pll_res->index) {
+			rc = of_msm_clock_register(pdev->dev.of_node,
+					mdss_dsi_pll_1_cc_8994,
+					ARRAY_SIZE(mdss_dsi_pll_1_cc_8994));
+			if (rc) {
+				pr_err("Clock register failed\n");
+				rc = -EPROBE_DEFER;
+			}
+		} else {
+			rc = of_msm_clock_register(pdev->dev.of_node,
+				mdss_dsi_pllcc_8994,
+				ARRAY_SIZE(mdss_dsi_pllcc_8994));
+			if (rc) {
+				pr_err("Clock register failed\n");
+				rc = -EPROBE_DEFER;
+			}
+			pll_res->gdsc_cb.notifier_call =
+				dsi_pll_regulator_notifier_call;
+			INIT_WORK(&pll_res->pll_off, dsi_pll_off_work);
+
+			pll_reg = mdss_pll_get_mp_by_reg_name(pll_res, "gdsc");
+			if (pll_reg) {
+				pr_debug("Registering for gdsc regulator events\n");
+				if (regulator_register_notifier(pll_reg->vreg,
+							&(pll_res->gdsc_cb)))
+					pr_err("Regulator notification registration failed!\n");
+			}
+		}
+
+	} else {
+		pr_err("Invalid target ID\n");
+		rc = -EINVAL;
+	}
+
+	if (!rc)
+		pr_info("Registered DSI PLL clocks successfully\n");
+
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28hpm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28hpm.c
new file mode 100644
index 0000000..96166d3
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-28hpm.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8974.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+
+#define VCO_DELAY_USEC		1
+
+static struct clk_div_ops fixed_2div_ops;
+static const struct clk_ops byte_mux_clk_ops;
+static const struct clk_ops pixel_clk_src_ops;
+static const struct clk_ops byte_clk_src_ops;
+static const struct clk_ops analog_postdiv_clk_ops;
+static struct lpfr_cfg lpfr_lut_struct[] = {
+	{479500000, 8},
+	{480000000, 11},
+	{575500000, 8},
+	{576000000, 12},
+	{610500000, 8},
+	{659500000, 9},
+	{671500000, 10},
+	{672000000, 14},
+	{708500000, 10},
+	{750000000, 11},
+};
+
+static void dsi_pll_software_reset(struct mdss_pll_resources *dsi_pll_res)
+{
+	/*
+	 * Add HW recommended delays after toggling the software
+	 * reset bit off and back on.
+	 */
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
+	udelay(1);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
+	udelay(1);
+}
+
+static int vco_set_rate_hpm(struct clk *c, unsigned long rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	rc = vco_set_rate(vco, rate);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+static int dsi_pll_enable_seq_8974(struct mdss_pll_resources *dsi_pll_res)
+{
+	int i, rc = 0;
+	int pll_locked;
+
+	dsi_pll_software_reset(dsi_pll_res);
+
+	/*
+	 * PLL power up sequence.
+	 * Add necessary delays recommeded by hardware.
+	 */
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+	udelay(1);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+	udelay(200);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x07);
+	udelay(500);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+	udelay(500);
+
+	for (i = 0; i < 2; i++) {
+		udelay(100);
+		/* DSI Uniphy lock detect setting */
+		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0c);
+		udelay(100);
+		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
+
+		pll_locked = dsi_pll_lock_status(dsi_pll_res);
+		if (pll_locked)
+			break;
+
+		dsi_pll_software_reset(dsi_pll_res);
+		/*
+		 * PLL power up sequence.
+		 * Add necessary delays recommeded by hardware.
+		 */
+		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x1);
+		udelay(1);
+		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x5);
+		udelay(200);
+		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x7);
+		udelay(250);
+		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x5);
+		udelay(200);
+		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x7);
+		udelay(500);
+		MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0xf);
+		udelay(500);
+
+	}
+
+	if (!pll_locked) {
+		pr_err("DSI PLL lock failed\n");
+		rc = -EINVAL;
+	} else {
+		pr_debug("DSI PLL Lock success\n");
+	}
+
+	return rc;
+}
+
+/* Op structures */
+
+static const struct clk_ops clk_ops_dsi_vco = {
+	.set_rate = vco_set_rate_hpm,
+	.round_rate = vco_round_rate,
+	.handoff = vco_handoff,
+	.prepare = vco_prepare,
+	.unprepare = vco_unprepare,
+};
+
+
+static struct clk_div_ops fixed_4div_ops = {
+	.set_div = fixed_4div_set_div,
+	.get_div = fixed_4div_get_div,
+};
+
+static struct clk_div_ops analog_postdiv_ops = {
+	.set_div = analog_set_div,
+	.get_div = analog_get_div,
+};
+
+static struct clk_div_ops digital_postdiv_ops = {
+	.set_div = digital_set_div,
+	.get_div = digital_get_div,
+};
+
+static struct clk_mux_ops byte_mux_ops = {
+	.set_mux_sel = set_byte_mux_sel,
+	.get_mux_sel = get_byte_mux_sel,
+};
+
+static struct dsi_pll_vco_clk dsi_vco_clk_8974 = {
+	.ref_clk_rate = 19200000,
+	.min_rate = 350000000,
+	.max_rate = 750000000,
+	.pll_en_seq_cnt = 3,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_8974,
+	.pll_enable_seqs[1] = dsi_pll_enable_seq_8974,
+	.pll_enable_seqs[2] = dsi_pll_enable_seq_8974,
+	.lpfr_lut_size = 10,
+	.lpfr_lut = lpfr_lut_struct,
+	.c = {
+		.dbg_name = "dsi_vco_clk_8974",
+		.ops = &clk_ops_dsi_vco,
+		CLK_INIT(dsi_vco_clk_8974.c),
+	},
+};
+
+static struct div_clk analog_postdiv_clk_8974 = {
+	.data = {
+		.max_div = 255,
+		.min_div = 1,
+	},
+	.ops = &analog_postdiv_ops,
+	.c = {
+		.parent = &dsi_vco_clk_8974.c,
+		.dbg_name = "analog_postdiv_clk",
+		.ops = &analog_postdiv_clk_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(analog_postdiv_clk_8974.c),
+	},
+};
+
+static struct div_clk indirect_path_div2_clk_8974 = {
+	.ops = &fixed_2div_ops,
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &analog_postdiv_clk_8974.c,
+		.dbg_name = "indirect_path_div2_clk",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(indirect_path_div2_clk_8974.c),
+	},
+};
+
+static struct div_clk pixel_clk_src_8974 = {
+	.data = {
+		.max_div = 255,
+		.min_div = 1,
+	},
+	.ops = &digital_postdiv_ops,
+	.c = {
+		.parent = &dsi_vco_clk_8974.c,
+		.dbg_name = "pixel_clk_src_8974",
+		.ops = &pixel_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(pixel_clk_src_8974.c),
+	},
+};
+
+static struct mux_clk byte_mux_8974 = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]){
+		{&dsi_vco_clk_8974.c, 0},
+		{&indirect_path_div2_clk_8974.c, 1},
+	},
+	.ops = &byte_mux_ops,
+	.c = {
+		.parent = &dsi_vco_clk_8974.c,
+		.dbg_name = "byte_mux_8974",
+		.ops = &byte_mux_clk_ops,
+		CLK_INIT(byte_mux_8974.c),
+	},
+};
+
+static struct div_clk byte_clk_src_8974 = {
+	.ops = &fixed_4div_ops,
+	.data = {
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.c = {
+		.parent = &byte_mux_8974.c,
+		.dbg_name = "byte_clk_src_8974",
+		.ops = &byte_clk_src_ops,
+		CLK_INIT(byte_clk_src_8974.c),
+	},
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8974[] = {
+	CLK_LOOKUP_OF("pixel_src", pixel_clk_src_8974,
+						"fd8c0000.qcom,mmsscc-mdss"),
+	CLK_LOOKUP_OF("byte_src", byte_clk_src_8974,
+						"fd8c0000.qcom,mmsscc-mdss"),
+};
+
+int dsi_pll_clock_register_hpm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc;
+
+	/* Set client data to mux, div and vco clocks */
+	byte_clk_src_8974.priv = pll_res;
+	pixel_clk_src_8974.priv = pll_res;
+	byte_mux_8974.priv = pll_res;
+	indirect_path_div2_clk_8974.priv = pll_res;
+	analog_postdiv_clk_8974.priv = pll_res;
+	dsi_vco_clk_8974.priv = pll_res;
+	pll_res->vco_delay = VCO_DELAY_USEC;
+
+	/* Set clock source operations */
+	pixel_clk_src_ops = clk_ops_slave_div;
+	pixel_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+	analog_postdiv_clk_ops = clk_ops_div;
+	analog_postdiv_clk_ops.prepare = dsi_pll_div_prepare;
+
+	byte_clk_src_ops = clk_ops_div;
+	byte_clk_src_ops.prepare = dsi_pll_div_prepare;
+
+	byte_mux_clk_ops = clk_ops_gen_mux;
+	byte_mux_clk_ops.prepare = dsi_pll_mux_prepare;
+
+	if (pll_res->target_id == MDSS_PLL_TARGET_8974) {
+		rc = of_msm_clock_register(pdev->dev.of_node,
+			mdss_dsi_pllcc_8974, ARRAY_SIZE(mdss_dsi_pllcc_8974));
+		if (rc) {
+			pr_err("Clock register failed\n");
+			rc = -EPROBE_DEFER;
+		}
+	} else {
+		pr_err("Invalid target ID\n");
+		rc = -EINVAL;
+	}
+
+	if (!rc)
+		pr_info("Registered DSI PLL clocks successfully\n");
+
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c
new file mode 100644
index 0000000..aed2766
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-28lpm.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <dt-bindings/clock/mdss-28nm-pll-clk.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-28nm.h"
+
+#define VCO_DELAY_USEC			1000
+
+enum {
+	DSI_PLL_0,
+	DSI_PLL_1,
+	DSI_PLL_MAX
+};
+
+static struct lpfr_cfg lpfr_lut_struct[] = {
+	{479500000, 8},
+	{480000000, 11},
+	{575500000, 8},
+	{576000000, 12},
+	{610500000, 8},
+	{659500000, 9},
+	{671500000, 10},
+	{672000000, 14},
+	{708500000, 10},
+	{750000000, 11},
+};
+
+static void dsi_pll_sw_reset(struct mdss_pll_resources *rsc)
+{
+	/*
+	 * DSI PLL software reset. Add HW recommended delays after toggling
+	 * the software reset bit off and back on.
+	 */
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
+	ndelay(500);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
+}
+
+static void dsi_pll_toggle_lock_detect(
+				struct mdss_pll_resources *rsc)
+{
+	/* DSI PLL toggle lock detect setting */
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x04);
+	ndelay(500);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x05);
+	udelay(512);
+}
+
+static int dsi_pll_check_lock_status(
+				struct mdss_pll_resources *rsc)
+{
+	int rc = 0;
+
+	rc = dsi_pll_lock_status(rsc);
+	if (rc)
+		pr_debug("PLL Locked\n");
+	else
+		pr_err("PLL failed to lock\n");
+
+	return rc;
+}
+
+
+static int dsi_pll_enable_seq_gf2(struct mdss_pll_resources *rsc)
+{
+	int pll_locked = 0;
+
+	dsi_pll_sw_reset(rsc);
+
+	/*
+	 * GF PART 2 PLL power up sequence.
+	 * Add necessary delays recommended by hardware.
+	 */
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x04);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+	udelay(3);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+	udelay(500);
+
+	dsi_pll_toggle_lock_detect(rsc);
+
+	pll_locked = dsi_pll_check_lock_status(rsc);
+	return pll_locked ? 0 : -EINVAL;
+}
+
+static int dsi_pll_enable_seq_gf1(struct mdss_pll_resources *rsc)
+{
+	int pll_locked = 0;
+
+	dsi_pll_sw_reset(rsc);
+	/*
+	 * GF PART 1 PLL power up sequence.
+	 * Add necessary delays recommended by hardware.
+	 */
+
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x14);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+	udelay(3);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+	udelay(500);
+
+	dsi_pll_toggle_lock_detect(rsc);
+
+	pll_locked = dsi_pll_check_lock_status(rsc);
+	return pll_locked ? 0 : -EINVAL;
+}
+
+static int dsi_pll_enable_seq_tsmc(struct mdss_pll_resources *rsc)
+{
+	int pll_locked = 0;
+
+	dsi_pll_sw_reset(rsc);
+	/*
+	 * TSMC PLL power up sequence.
+	 * Add necessary delays recommended by hardware.
+	 */
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1, 0x34);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+	udelay(500);
+
+	dsi_pll_toggle_lock_detect(rsc);
+
+	pll_locked = dsi_pll_check_lock_status(rsc);
+	return pll_locked ? 0 : -EINVAL;
+}
+
+static struct regmap_config dsi_pll_28lpm_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0xF4,
+};
+
+static struct regmap_bus analog_postdiv_regmap_bus = {
+	.reg_write = analog_postdiv_reg_write,
+	.reg_read = analog_postdiv_reg_read,
+};
+
+static struct regmap_bus byteclk_src_mux_regmap_bus = {
+	.reg_write = byteclk_mux_write_sel,
+	.reg_read = byteclk_mux_read_sel,
+};
+
+static struct regmap_bus pclk_src_regmap_bus = {
+	.reg_write = pixel_clk_set_div,
+	.reg_read = pixel_clk_get_div,
+};
+
+static const struct clk_ops clk_ops_vco_28lpm = {
+	.recalc_rate = vco_28nm_recalc_rate,
+	.set_rate = vco_28nm_set_rate,
+	.round_rate = vco_28nm_round_rate,
+	.prepare = vco_28nm_prepare,
+	.unprepare = vco_28nm_unprepare,
+};
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 350000000UL,
+	.max_rate = 750000000UL,
+	.pll_en_seq_cnt = 9,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_tsmc,
+	.pll_enable_seqs[1] = dsi_pll_enable_seq_tsmc,
+	.pll_enable_seqs[2] = dsi_pll_enable_seq_tsmc,
+	.pll_enable_seqs[3] = dsi_pll_enable_seq_gf1,
+	.pll_enable_seqs[4] = dsi_pll_enable_seq_gf1,
+	.pll_enable_seqs[5] = dsi_pll_enable_seq_gf1,
+	.pll_enable_seqs[6] = dsi_pll_enable_seq_gf2,
+	.pll_enable_seqs[7] = dsi_pll_enable_seq_gf2,
+	.pll_enable_seqs[8] = dsi_pll_enable_seq_gf2,
+	.lpfr_lut_size = 10,
+	.lpfr_lut = lpfr_lut_struct,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_vco_clk",
+			.parent_names = (const char *[]){"cxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_28lpm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 350000000UL,
+	.max_rate = 750000000UL,
+	.pll_en_seq_cnt = 9,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_tsmc,
+	.pll_enable_seqs[1] = dsi_pll_enable_seq_tsmc,
+	.pll_enable_seqs[2] = dsi_pll_enable_seq_tsmc,
+	.pll_enable_seqs[3] = dsi_pll_enable_seq_gf1,
+	.pll_enable_seqs[4] = dsi_pll_enable_seq_gf1,
+	.pll_enable_seqs[5] = dsi_pll_enable_seq_gf1,
+	.pll_enable_seqs[6] = dsi_pll_enable_seq_gf2,
+	.pll_enable_seqs[7] = dsi_pll_enable_seq_gf2,
+	.pll_enable_seqs[8] = dsi_pll_enable_seq_gf2,
+	.lpfr_lut_size = 10,
+	.lpfr_lut = lpfr_lut_struct,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_vco_clk",
+			.parent_names = (const char *[]){"cxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_28lpm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
+static struct clk_regmap_div dsi0pll_analog_postdiv = {
+	.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_analog_postdiv",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_analog_postdiv = {
+	.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_analog_postdiv",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_indirect_path_src = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_indirect_path_src",
+		.parent_names = (const char *[]){"dsi0pll_analog_postdiv"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_indirect_path_src = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_indirect_path_src",
+		.parent_names = (const char *[]){"dsi1pll_analog_postdiv"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_byteclk_src_mux = {
+	.reg = DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
+	.shift = 1,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_byteclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi0pll_vco_clk",
+				"dsi0pll_indirect_path_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_byteclk_src_mux = {
+	.reg = DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
+	.shift = 1,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_byteclk_src_mux",
+			.parent_names = (const char *[]){
+				"dsi1pll_vco_clk",
+				"dsi1pll_indirect_path_src"},
+			.num_parents = 2,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_byteclk_src = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_byteclk_src",
+		.parent_names = (const char *[]){
+				"dsi0pll_byteclk_src_mux"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_byteclk_src = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_byteclk_src",
+		.parent_names = (const char *[]){
+				"dsi1pll_byteclk_src_mux"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_div dsi0pll_pclk_src = {
+	.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
+	.shift = 0,
+	.width = 8,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pclk_src = {
+	.reg = DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
+	.shift = 0,
+	.width = 8,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_hw *mdss_dsi_pllcc_28lpm[] = {
+	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+	[ANALOG_POSTDIV_0_CLK] = &dsi0pll_analog_postdiv.clkr.hw,
+	[INDIRECT_PATH_SRC_0_CLK] = &dsi0pll_indirect_path_src.hw,
+	[BYTECLK_SRC_MUX_0_CLK] = &dsi0pll_byteclk_src_mux.clkr.hw,
+	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
+	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
+	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+	[ANALOG_POSTDIV_1_CLK] = &dsi1pll_analog_postdiv.clkr.hw,
+	[INDIRECT_PATH_SRC_1_CLK] = &dsi1pll_indirect_path_src.hw,
+	[BYTECLK_SRC_MUX_1_CLK] = &dsi1pll_byteclk_src_mux.clkr.hw,
+	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
+	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
+};
+
+int dsi_pll_clock_register_28lpm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0, ndx, i;
+	struct clk *clk;
+	struct clk_onecell_data *clk_data;
+	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_28lpm);
+	struct regmap *rmap;
+
+	int const ssc_freq_min = 30000; /* min. recommended freq. value */
+	int const ssc_freq_max = 33000; /* max. recommended freq. value */
+	int const ssc_ppm_max = 5000; /* max. recommended ppm */
+
+	ndx = pll_res->index;
+
+	if (ndx >= DSI_PLL_MAX) {
+		pr_err("pll index(%d) NOT supported\n", ndx);
+		return -EINVAL;
+	}
+
+	pll_res->vco_delay = VCO_DELAY_USEC;
+
+	if (pll_res->ssc_en) {
+		if (!pll_res->ssc_freq || (pll_res->ssc_freq < ssc_freq_min) ||
+			(pll_res->ssc_freq > ssc_freq_max)) {
+			pll_res->ssc_freq = ssc_freq_min;
+			pr_debug("SSC frequency out of recommended range. Set to default=%d\n",
+				pll_res->ssc_freq);
+		}
+
+		if (!pll_res->ssc_ppm || (pll_res->ssc_ppm > ssc_ppm_max)) {
+			pll_res->ssc_ppm = ssc_ppm_max;
+			pr_debug("SSC PPM out of recommended range. Set to default=%d\n",
+				pll_res->ssc_ppm);
+		}
+	}
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data),
+					GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks
+				sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
+	/* Establish client data */
+	if (ndx == 0) {
+		rmap = devm_regmap_init(&pdev->dev, &byteclk_src_mux_regmap_bus,
+				pll_res, &dsi_pll_28lpm_config);
+		if (IS_ERR(rmap)) {
+			pr_err("regmap init failed for DSI clock:%d\n",
+					pll_res->index);
+			return -EINVAL;
+		}
+		dsi0pll_byteclk_src_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &analog_postdiv_regmap_bus,
+				pll_res, &dsi_pll_28lpm_config);
+		if (IS_ERR(rmap)) {
+			pr_err("regmap init failed for DSI clock:%d\n",
+					pll_res->index);
+			return -EINVAL;
+		}
+		dsi0pll_analog_postdiv.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_28lpm_config);
+		if (IS_ERR(rmap)) {
+			pr_err("regmap init failed for DSI clock:%d\n",
+					pll_res->index);
+			return -EINVAL;
+		}
+		dsi0pll_pclk_src.clkr.regmap = rmap;
+
+		dsi0pll_vco_clk.priv = pll_res;
+		for (i = VCO_CLK_0; i <= PCLK_SRC_0_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_28lpm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+							pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+
+	} else {
+		rmap = devm_regmap_init(&pdev->dev, &byteclk_src_mux_regmap_bus,
+				pll_res, &dsi_pll_28lpm_config);
+		if (IS_ERR(rmap)) {
+			pr_err("regmap init failed for DSI clock:%d\n",
+					pll_res->index);
+			return -EINVAL;
+		}
+		dsi1pll_byteclk_src_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &analog_postdiv_regmap_bus,
+				pll_res, &dsi_pll_28lpm_config);
+		if (IS_ERR(rmap)) {
+			pr_err("regmap init failed for DSI clock:%d\n",
+					pll_res->index);
+			return -EINVAL;
+		}
+		dsi1pll_analog_postdiv.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_28lpm_config);
+		if (IS_ERR(rmap)) {
+			pr_err("regmap init failed for DSI clock:%d\n",
+					pll_res->index);
+			return -EINVAL;
+		}
+		dsi1pll_pclk_src.clkr.regmap = rmap;
+
+		dsi1pll_vco_clk.priv = pll_res;
+		for (i = VCO_CLK_1; i <= PCLK_SRC_1_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_28lpm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+							pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	}
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d, clocks successfully\n",
+				ndx);
+
+		return rc;
+	}
+
+clk_register_fail:
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c
new file mode 100644
index 0000000..1f3fad4
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm-util.c
@@ -0,0 +1,652 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-28nm.h"
+
+#define DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG	(0x0)
+#define DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG	(0x0008)
+#define DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG	(0x000C)
+#define DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG	(0x0014)
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG	(0x0024)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG		(0x002C)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG	(0x0030)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG	(0x0034)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0		(0x0038)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1		(0x003C)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2		(0x0040)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3		(0x0044)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4		(0x0048)
+#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0		(0x004C)
+#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1		(0x0050)
+#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2		(0x0054)
+#define DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3		(0x0058)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0		(0x006C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG2		(0x0074)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3		(0x0078)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4		(0x007C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG5		(0x0080)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6		(0x0084)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7		(0x0088)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8		(0x008C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9		(0x0090)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10	(0x0094)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11	(0x0098)
+#define DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG	(0x009C)
+#define DSI_PHY_PLL_UNIPHY_PLL_STATUS		(0x00C0)
+
+#define DSI_PLL_POLL_DELAY_US			50
+#define DSI_PLL_POLL_TIMEOUT_US			500
+
+int analog_postdiv_reg_read(void *context, unsigned int reg,
+		unsigned int *div)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	*div = MDSS_PLL_REG_R(rsc->pll_base, reg);
+
+	pr_debug("analog_postdiv div = %d\n", *div);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+	return rc;
+}
+
+int analog_postdiv_reg_write(void *context, unsigned int reg,
+		unsigned int div)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("analog_postdiv div = %d\n", div);
+
+	MDSS_PLL_REG_W(rsc->pll_base, reg, div);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+	return rc;
+}
+
+int byteclk_mux_read_sel(void *context, unsigned int reg,
+		unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = (MDSS_PLL_REG_R(rsc->pll_base, reg) & BIT(1));
+	pr_debug("byteclk mux mode = %s\n", *val ? "indirect" : "direct");
+
+	(void)mdss_pll_resource_enable(rsc, false);
+	return rc;
+}
+
+int byteclk_mux_write_sel(void *context, unsigned int reg,
+		unsigned int val)
+{
+	int rc = 0;
+	u32 reg_val = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("byteclk mux set to %s mode\n", val ? "indirect" : "direct");
+
+	reg_val = MDSS_PLL_REG_R(rsc->pll_base, reg);
+	reg_val &= ~0x02;
+	reg_val |= val;
+
+	MDSS_PLL_REG_W(rsc->pll_base, reg, reg_val);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+int pixel_clk_get_div(void *context, unsigned int reg,
+		unsigned int *div)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	*div = MDSS_PLL_REG_R(rsc->pll_base, reg);
+
+	pr_debug("pclk_src div = %d\n", *div);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+	return rc;
+}
+
+int pixel_clk_set_div(void *context, unsigned int reg,
+		unsigned int div)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	pr_debug("pclk_src div = %d\n", div);
+
+	MDSS_PLL_REG_W(rsc->pll_base, reg, div);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+	return rc;
+}
+
+int dsi_pll_lock_status(struct mdss_pll_resources *rsc)
+{
+	u32 status;
+	int pll_locked;
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((rsc->pll_base +
+			DSI_PHY_PLL_UNIPHY_PLL_STATUS),
+			status,
+			((status & BIT(0)) == 1),
+			DSI_PLL_POLL_DELAY_US,
+			DSI_PLL_POLL_TIMEOUT_US)) {
+		pr_debug("DSI PLL status=%x failed to Lock\n", status);
+		pll_locked = 0;
+	} else {
+		pll_locked = 1;
+	}
+
+	return pll_locked;
+}
+
+static int pll_28nm_vco_rate_calc(struct dsi_pll_vco_clk *vco,
+		struct mdss_dsi_vco_calc *vco_calc, unsigned long vco_clk_rate)
+{
+	s32 rem;
+	s64 frac_n_mode, ref_doubler_en_b;
+	s64 ref_clk_to_pll, div_fb, frac_n_value;
+	int i;
+
+	/* Configure the Loop filter resistance */
+	for (i = 0; i < vco->lpfr_lut_size; i++)
+		if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate)
+			break;
+	if (i == vco->lpfr_lut_size) {
+		pr_err("unable to get loop filter resistance. vco=%ld\n",
+			vco_clk_rate);
+		return -EINVAL;
+	}
+	vco_calc->lpfr_lut_res = vco->lpfr_lut[i].r;
+
+	div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem);
+	if (rem) {
+		vco_calc->refclk_cfg = 0x1;
+		frac_n_mode = 1;
+		ref_doubler_en_b = 0;
+	} else {
+		vco_calc->refclk_cfg = 0x0;
+		frac_n_mode = 0;
+		ref_doubler_en_b = 1;
+	}
+
+	pr_debug("refclk_cfg = %lld\n", vco_calc->refclk_cfg);
+
+	ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (vco_calc->refclk_cfg))
+			  + (ref_doubler_en_b * vco->ref_clk_rate));
+
+	div_fb = div_s64_rem(vco_clk_rate, ref_clk_to_pll, &rem);
+	frac_n_value = div_s64(((s64)rem * (1 << 16)), ref_clk_to_pll);
+	vco_calc->gen_vco_clk = vco_clk_rate;
+
+	pr_debug("ref_clk_to_pll = %lld\n", ref_clk_to_pll);
+	pr_debug("div_fb = %lld\n", div_fb);
+	pr_debug("frac_n_value = %lld\n", frac_n_value);
+
+	pr_debug("Generated VCO Clock: %lld\n", vco_calc->gen_vco_clk);
+	rem = 0;
+	if (frac_n_mode) {
+		vco_calc->sdm_cfg0 = 0;
+		vco_calc->sdm_cfg1 = (div_fb & 0x3f) - 1;
+		vco_calc->sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem);
+		vco_calc->sdm_cfg2 = rem;
+	} else {
+		vco_calc->sdm_cfg0 = (0x1 << 5);
+		vco_calc->sdm_cfg0 |= (div_fb & 0x3f) - 1;
+		vco_calc->sdm_cfg1 = 0;
+		vco_calc->sdm_cfg2 = 0;
+		vco_calc->sdm_cfg3 = 0;
+	}
+
+	pr_debug("sdm_cfg0=%lld\n", vco_calc->sdm_cfg0);
+	pr_debug("sdm_cfg1=%lld\n", vco_calc->sdm_cfg1);
+	pr_debug("sdm_cfg2=%lld\n", vco_calc->sdm_cfg2);
+	pr_debug("sdm_cfg3=%lld\n", vco_calc->sdm_cfg3);
+
+	vco_calc->cal_cfg11 = div_s64_rem(vco_calc->gen_vco_clk,
+			256 * 1000000, &rem);
+	vco_calc->cal_cfg10 = rem / 1000000;
+	pr_debug("cal_cfg10=%lld, cal_cfg11=%lld\n",
+		vco_calc->cal_cfg10, vco_calc->cal_cfg11);
+
+	return 0;
+}
+
+static void pll_28nm_ssc_param_calc(struct dsi_pll_vco_clk *vco,
+		struct mdss_dsi_vco_calc *vco_calc)
+{
+	struct mdss_pll_resources *rsc = vco->priv;
+	s64 ppm_freq, incr, spread_freq, div_rf, frac_n_value;
+	s32 rem;
+
+	if (!rsc->ssc_en) {
+		pr_debug("DSI PLL SSC not enabled\n");
+		return;
+	}
+
+	vco_calc->ssc.kdiv = DIV_ROUND_CLOSEST(vco->ref_clk_rate,
+			1000000) - 1;
+	vco_calc->ssc.triang_steps = DIV_ROUND_CLOSEST(vco->ref_clk_rate,
+			rsc->ssc_freq * (vco_calc->ssc.kdiv + 1));
+	ppm_freq = div_s64(vco_calc->gen_vco_clk * rsc->ssc_ppm,
+			1000000);
+	incr = div64_s64(ppm_freq * 65536, vco->ref_clk_rate * 2 *
+			vco_calc->ssc.triang_steps);
+
+	vco_calc->ssc.triang_inc_7_0 = incr & 0xff;
+	vco_calc->ssc.triang_inc_9_8 = (incr >> 8) & 0x3;
+
+	if (!rsc->ssc_center)
+		spread_freq = vco_calc->gen_vco_clk - ppm_freq;
+	else
+		spread_freq = vco_calc->gen_vco_clk - (ppm_freq / 2);
+
+	div_rf = div_s64(spread_freq, 2 * vco->ref_clk_rate);
+	vco_calc->ssc.dc_offset = (div_rf - 1);
+
+	div_s64_rem(spread_freq, 2 * vco->ref_clk_rate, &rem);
+	frac_n_value = div_s64((s64)rem * 65536, 2 * vco->ref_clk_rate);
+
+	vco_calc->ssc.freq_seed_7_0 = frac_n_value & 0xff;
+	vco_calc->ssc.freq_seed_15_8 = (frac_n_value >> 8) & 0xff;
+}
+
+static void pll_28nm_vco_config(struct dsi_pll_vco_clk *vco,
+		struct mdss_dsi_vco_calc *vco_calc)
+{
+	struct mdss_pll_resources *rsc = vco->priv;
+	void __iomem *pll_base = rsc->pll_base;
+	u32 vco_delay_us = rsc->vco_delay;
+	bool ssc_en = rsc->ssc_en;
+
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG,
+		vco_calc->lpfr_lut_res);
+
+	/* Loop filter capacitance values : c1 and c2 */
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15);
+
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
+
+	if (!ssc_en) {
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
+			(u32)(vco_calc->sdm_cfg1 & 0xff));
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
+			(u32)(vco_calc->sdm_cfg2 & 0xff));
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
+			(u32)(vco_calc->sdm_cfg3 & 0xff));
+	} else {
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1,
+			(u32)vco_calc->ssc.dc_offset);
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2,
+			(u32)vco_calc->ssc.freq_seed_7_0);
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3,
+			(u32)vco_calc->ssc.freq_seed_15_8);
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG0,
+			(u32)vco_calc->ssc.kdiv);
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG1,
+			(u32)vco_calc->ssc.triang_inc_7_0);
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG2,
+			(u32)vco_calc->ssc.triang_inc_9_8);
+		MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SSC_CFG3,
+			(u32)vco_calc->ssc.triang_steps);
+	}
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
+
+	/* Add hardware recommended delay for correct PLL configuration */
+	if (vco_delay_us)
+		udelay(vco_delay_us);
+
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG,
+		(u32)vco_calc->refclk_cfg);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0,
+		(u32)vco_calc->sdm_cfg0);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00);
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10,
+		(u32)(vco_calc->cal_cfg10 & 0xff));
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11,
+		(u32)(vco_calc->cal_cfg11 & 0xff));
+	MDSS_PLL_REG_W(pll_base, DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20);
+	MDSS_PLL_REG_W(pll_base,
+		DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, 0x3); /* Fixed div-4 */
+}
+
+static int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate)
+{
+	struct mdss_dsi_vco_calc vco_calc = {0};
+	int rc = 0;
+
+	rc = pll_28nm_vco_rate_calc(vco, &vco_calc, rate);
+	if (rc) {
+		pr_err("vco rate calculation failed\n");
+		return rc;
+	}
+
+	pll_28nm_ssc_param_calc(vco, &vco_calc);
+	pll_28nm_vco_config(vco, &vco_calc);
+
+	return 0;
+}
+
+static unsigned long vco_get_rate(struct dsi_pll_vco_clk *vco)
+{
+	struct mdss_pll_resources *rsc = vco->priv;
+	int rc;
+	u32 sdm0, doubler, sdm_byp_div;
+	u64 vco_rate;
+	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+	u64 ref_clk = vco->ref_clk_rate;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/* Check to see if the ref clk doubler is enabled */
+	doubler = MDSS_PLL_REG_R(rsc->pll_base,
+				 DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG) & BIT(0);
+	ref_clk += (doubler * vco->ref_clk_rate);
+
+	/* see if it is integer mode or sdm mode */
+	sdm0 = MDSS_PLL_REG_R(rsc->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0);
+	if (sdm0 & BIT(6)) {
+		/* integer mode */
+		sdm_byp_div = (MDSS_PLL_REG_R(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0) & 0x3f) + 1;
+		vco_rate = ref_clk * sdm_byp_div;
+	} else {
+		/* sdm mode */
+		sdm_dc_off = MDSS_PLL_REG_R(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1) & 0xFF;
+		pr_debug("sdm_dc_off = %d\n", sdm_dc_off);
+		sdm2 = MDSS_PLL_REG_R(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2) & 0xFF;
+		sdm3 = MDSS_PLL_REG_R(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3) & 0xFF;
+		sdm_freq_seed = (sdm3 << 8) | sdm2;
+		pr_debug("sdm_freq_seed = %d\n", sdm_freq_seed);
+
+		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+		pr_debug("vco rate = %lld\n", vco_rate);
+	}
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return (unsigned long)vco_rate;
+}
+
+static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
+{
+	int i, rc;
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable dsi pll(%d) resources\n",
+				rsc->index);
+		return rc;
+	}
+
+	/* Try all enable sequences until one succeeds */
+	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+		rc = vco->pll_enable_seqs[i](rsc);
+		pr_debug("DSI PLL %s after sequence #%d\n",
+			rc ? "unlocked" : "locked", i + 1);
+		if (!rc)
+			break;
+	}
+
+	if (rc) {
+		mdss_pll_resource_enable(rsc, false);
+		pr_err("DSI PLL failed to lock\n");
+	}
+	rsc->pll_on = true;
+
+	return rc;
+}
+
+static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
+{
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc->pll_on &&
+		mdss_pll_resource_enable(rsc, true)) {
+		pr_err("failed to enable dsi pll(%d) resources\n",
+				rsc->index);
+		return;
+	}
+
+	rsc->handoff_resources = false;
+
+	MDSS_PLL_REG_W(rsc->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x00);
+
+	mdss_pll_resource_enable(rsc, false);
+	rsc->pll_on = false;
+
+	pr_debug("DSI PLL Disabled\n");
+}
+
+int vco_28nm_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+	int rc;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	if (rsc->pll_on)
+		return 0;
+
+	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+		       rsc->index, rc);
+		return rc;
+	}
+
+	/*
+	 * DSI PLL software reset. Add HW recommended delays after toggling
+	 * the software reset bit off and back on.
+	 */
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x01);
+	udelay(1000);
+	MDSS_PLL_REG_W(rsc->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG, 0x00);
+	udelay(1000);
+
+	rc = vco_set_rate(vco, rate);
+	rsc->vco_current_rate = rate;
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return 0;
+}
+
+long vco_28nm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	*parent_rate = rrate;
+
+	return rrate;
+}
+
+unsigned long vco_28nm_recalc_rate(struct clk_hw *hw,
+					unsigned long parent_rate)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+	int rc;
+	u64 vco_rate = 0;
+
+	if (!rsc) {
+		pr_err("dsi pll resources not available\n");
+		return 0;
+	}
+
+	if (rsc->vco_current_rate)
+		return (unsigned long)rsc->vco_current_rate;
+
+	if (is_gdsc_disabled(rsc))
+		return 0;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable dsi pll(%d) resources\n",
+				rsc->index);
+		return 0;
+	}
+
+	if (dsi_pll_lock_status(rsc)) {
+		rsc->handoff_resources = true;
+		rsc->pll_on = true;
+		vco_rate = vco_get_rate(vco);
+	} else {
+		mdss_pll_resource_enable(rsc, false);
+	}
+
+	return (unsigned long)vco_rate;
+}
+
+int vco_28nm_prepare(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc) {
+		pr_err("dsi pll resources not available\n");
+		return -EINVAL;
+	}
+
+	if ((rsc->vco_cached_rate != 0)
+	    && (rsc->vco_cached_rate == clk_hw_get_rate(hw))) {
+		rc = hw->init->ops->set_rate(hw, rsc->vco_cached_rate,
+				rsc->vco_cached_rate);
+		if (rc) {
+			pr_err("pll(%d ) set_rate failed. rc=%d\n",
+					rsc->index, rc);
+			goto error;
+		}
+
+		MDSS_PLL_REG_W(rsc->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG,
+				rsc->cached_postdiv1);
+		MDSS_PLL_REG_W(rsc->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG,
+				rsc->cached_postdiv3);
+		MDSS_PLL_REG_W(rsc->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG,
+				rsc->cached_vreg_cfg);
+	}
+
+	rc = dsi_pll_enable(vco);
+
+error:
+	return rc;
+}
+
+void vco_28nm_unprepare(struct clk_hw *hw)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc) {
+		pr_err("dsi pll resources not available\n");
+		return;
+	}
+
+	rsc->cached_postdiv1 = MDSS_PLL_REG_R(rsc->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG);
+	rsc->cached_postdiv3 = MDSS_PLL_REG_R(rsc->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG);
+	rsc->cached_vreg_cfg = MDSS_PLL_REG_R(rsc->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG);
+
+	rsc->vco_cached_rate = clk_hw_get_rate(hw);
+
+	dsi_pll_disable(vco);
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h
new file mode 100644
index 0000000..251e301
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-28nm.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDSS_DSI_PLL_28NM_H
+#define __MDSS_DSI_PLL_28NM_H
+
+#define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG          (0x0020)
+#define DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2       (0x0064)
+#define DSI_PHY_PLL_UNIPHY_PLL_TEST_CFG         (0x0068)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG1         (0x0070)
+
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG	(0x0004)
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG	(0x0028)
+#define DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG		(0x0010)
+
+struct ssc_params {
+	s32 kdiv;
+	s64 triang_inc_7_0;
+	s64 triang_inc_9_8;
+	s64 triang_steps;
+	s64 dc_offset;
+	s64 freq_seed_7_0;
+	s64 freq_seed_15_8;
+};
+
+struct mdss_dsi_vco_calc {
+	s64 sdm_cfg0;
+	s64 sdm_cfg1;
+	s64 sdm_cfg2;
+	s64 sdm_cfg3;
+	s64 cal_cfg10;
+	s64 cal_cfg11;
+	s64 refclk_cfg;
+	s64 gen_vco_clk;
+	u32 lpfr_lut_res;
+	struct ssc_params ssc;
+};
+
+unsigned long vco_28nm_recalc_rate(struct clk_hw *hw,
+				unsigned long parent_rate);
+int vco_28nm_set_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long parent_rate);
+long vco_28nm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate);
+int vco_28nm_prepare(struct clk_hw *hw);
+void vco_28nm_unprepare(struct clk_hw *hw);
+
+int analog_postdiv_reg_write(void *context,
+				unsigned int reg, unsigned int div);
+int analog_postdiv_reg_read(void *context,
+				unsigned int reg, unsigned int *div);
+int byteclk_mux_write_sel(void *context,
+				unsigned int reg, unsigned int val);
+int byteclk_mux_read_sel(void *context,
+				unsigned int reg, unsigned int *val);
+int pixel_clk_set_div(void *context,
+				unsigned int reg, unsigned int div);
+int pixel_clk_get_div(void *context,
+				unsigned int reg, unsigned int *div);
+
+int dsi_pll_lock_status(struct mdss_pll_resources *rsc);
+#endif /* __MDSS_DSI_PLL_28NM_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c
new file mode 100644
index 0000000..fd014eb
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-7nm.c
@@ -0,0 +1,1846 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include "mdss-dsi-pll.h"
+#include "mdss-pll.h"
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
+
+#define VCO_DELAY_USEC 1
+
+#define MHZ_250		250000000UL
+#define MHZ_500		500000000UL
+#define MHZ_1000	1000000000UL
+#define MHZ_1100	1100000000UL
+#define MHZ_1900	1900000000UL
+#define MHZ_3000	3000000000UL
+
+/* Register Offsets from PLL base address */
+#define PLL_ANALOG_CONTROLS_ONE			0x0000
+#define PLL_ANALOG_CONTROLS_TWO			0x0004
+#define PLL_INT_LOOP_SETTINGS			0x0008
+#define PLL_INT_LOOP_SETTINGS_TWO		0x000C
+#define PLL_ANALOG_CONTROLS_THREE		0x0010
+#define PLL_ANALOG_CONTROLS_FOUR		0x0014
+#define PLL_ANALOG_CONTROLS_FIVE		0x0018
+#define PLL_INT_LOOP_CONTROLS			0x001C
+#define PLL_DSM_DIVIDER				0x0020
+#define PLL_FEEDBACK_DIVIDER			0x0024
+#define PLL_SYSTEM_MUXES			0x0028
+#define PLL_FREQ_UPDATE_CONTROL_OVERRIDES	0x002C
+#define PLL_CMODE				0x0030
+#define PLL_PSM_CTRL				0x0034
+#define PLL_RSM_CTRL				0x0038
+#define PLL_VCO_TUNE_MAP			0x003C
+#define PLL_PLL_CNTRL				0x0040
+#define PLL_CALIBRATION_SETTINGS		0x0044
+#define PLL_BAND_SEL_CAL_TIMER_LOW		0x0048
+#define PLL_BAND_SEL_CAL_TIMER_HIGH		0x004C
+#define PLL_BAND_SEL_CAL_SETTINGS		0x0050
+#define PLL_BAND_SEL_MIN			0x0054
+#define PLL_BAND_SEL_MAX			0x0058
+#define PLL_BAND_SEL_PFILT			0x005C
+#define PLL_BAND_SEL_IFILT			0x0060
+#define PLL_BAND_SEL_CAL_SETTINGS_TWO		0x0064
+#define PLL_BAND_SEL_CAL_SETTINGS_THREE		0x0068
+#define PLL_BAND_SEL_CAL_SETTINGS_FOUR		0x006C
+#define PLL_BAND_SEL_ICODE_HIGH			0x0070
+#define PLL_BAND_SEL_ICODE_LOW			0x0074
+#define PLL_FREQ_DETECT_SETTINGS_ONE		0x0078
+#define PLL_FREQ_DETECT_THRESH			0x007C
+#define PLL_FREQ_DET_REFCLK_HIGH		0x0080
+#define PLL_FREQ_DET_REFCLK_LOW			0x0084
+#define PLL_FREQ_DET_PLLCLK_HIGH		0x0088
+#define PLL_FREQ_DET_PLLCLK_LOW			0x008C
+#define PLL_PFILT				0x0090
+#define PLL_IFILT				0x0094
+#define PLL_PLL_GAIN				0x0098
+#define PLL_ICODE_LOW				0x009C
+#define PLL_ICODE_HIGH				0x00A0
+#define PLL_LOCKDET				0x00A4
+#define PLL_OUTDIV				0x00A8
+#define PLL_FASTLOCK_CONTROL			0x00AC
+#define PLL_PASS_OUT_OVERRIDE_ONE		0x00B0
+#define PLL_PASS_OUT_OVERRIDE_TWO		0x00B4
+#define PLL_CORE_OVERRIDE			0x00B8
+#define PLL_CORE_INPUT_OVERRIDE			0x00BC
+#define PLL_RATE_CHANGE				0x00C0
+#define PLL_PLL_DIGITAL_TIMERS			0x00C4
+#define PLL_PLL_DIGITAL_TIMERS_TWO		0x00C8
+#define PLL_DECIMAL_DIV_START			0x00CC
+#define PLL_FRAC_DIV_START_LOW			0x00D0
+#define PLL_FRAC_DIV_START_MID			0x00D4
+#define PLL_FRAC_DIV_START_HIGH			0x00D8
+#define PLL_DEC_FRAC_MUXES			0x00DC
+#define PLL_DECIMAL_DIV_START_1			0x00E0
+#define PLL_FRAC_DIV_START_LOW_1		0x00E4
+#define PLL_FRAC_DIV_START_MID_1		0x00E8
+#define PLL_FRAC_DIV_START_HIGH_1		0x00EC
+#define PLL_DECIMAL_DIV_START_2			0x00F0
+#define PLL_FRAC_DIV_START_LOW_2		0x00F4
+#define PLL_FRAC_DIV_START_MID_2		0x00F8
+#define PLL_FRAC_DIV_START_HIGH_2		0x00FC
+#define PLL_MASH_CONTROL			0x0100
+#define PLL_SSC_STEPSIZE_LOW			0x0104
+#define PLL_SSC_STEPSIZE_HIGH			0x0108
+#define PLL_SSC_DIV_PER_LOW			0x010C
+#define PLL_SSC_DIV_PER_HIGH			0x0110
+#define PLL_SSC_ADJPER_LOW			0x0114
+#define PLL_SSC_ADJPER_HIGH			0x0118
+#define PLL_SSC_MUX_CONTROL			0x011C
+#define PLL_SSC_STEPSIZE_LOW_1			0x0120
+#define PLL_SSC_STEPSIZE_HIGH_1			0x0124
+#define PLL_SSC_DIV_PER_LOW_1			0x0128
+#define PLL_SSC_DIV_PER_HIGH_1			0x012C
+#define PLL_SSC_ADJPER_LOW_1			0x0130
+#define PLL_SSC_ADJPER_HIGH_1			0x0134
+#define PLL_SSC_STEPSIZE_LOW_2			0x0138
+#define PLL_SSC_STEPSIZE_HIGH_2			0x013C
+#define PLL_SSC_DIV_PER_LOW_2			0x0140
+#define PLL_SSC_DIV_PER_HIGH_2			0x0144
+#define PLL_SSC_ADJPER_LOW_2			0x0148
+#define PLL_SSC_ADJPER_HIGH_2			0x014C
+#define PLL_SSC_CONTROL				0x0150
+#define PLL_PLL_OUTDIV_RATE			0x0154
+#define PLL_PLL_LOCKDET_RATE_1			0x0158
+#define PLL_PLL_LOCKDET_RATE_2			0x015C
+#define PLL_PLL_PROP_GAIN_RATE_1		0x0160
+#define PLL_PLL_PROP_GAIN_RATE_2		0x0164
+#define PLL_PLL_BAND_SEL_RATE_1			0x0168
+#define PLL_PLL_BAND_SEL_RATE_2			0x016C
+#define PLL_PLL_INT_GAIN_IFILT_BAND_1		0x0170
+#define PLL_PLL_INT_GAIN_IFILT_BAND_2		0x0174
+#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_1	0x0178
+#define PLL_PLL_FL_INT_GAIN_PFILT_BAND_2	0x017C
+#define PLL_PLL_FASTLOCK_EN_BAND		0x0180
+#define PLL_FREQ_TUNE_ACCUM_INIT_MID		0x0184
+#define PLL_FREQ_TUNE_ACCUM_INIT_HIGH		0x0188
+#define PLL_FREQ_TUNE_ACCUM_INIT_MUX		0x018C
+#define PLL_PLL_LOCK_OVERRIDE			0x0190
+#define PLL_PLL_LOCK_DELAY			0x0194
+#define PLL_PLL_LOCK_MIN_DELAY			0x0198
+#define PLL_CLOCK_INVERTERS			0x019C
+#define PLL_SPARE_AND_JPC_OVERRIDES		0x01A0
+#define PLL_BIAS_CONTROL_1			0x01A4
+#define PLL_BIAS_CONTROL_2			0x01A8
+#define PLL_ALOG_OBSV_BUS_CTRL_1		0x01AC
+#define PLL_COMMON_STATUS_ONE			0x01B0
+#define PLL_COMMON_STATUS_TWO			0x01B4
+#define PLL_BAND_SEL_CAL			0x01B8
+#define PLL_ICODE_ACCUM_STATUS_LOW		0x01BC
+#define PLL_ICODE_ACCUM_STATUS_HIGH		0x01C0
+#define PLL_FD_OUT_LOW				0x01C4
+#define PLL_FD_OUT_HIGH				0x01C8
+#define PLL_ALOG_OBSV_BUS_STATUS_1		0x01CC
+#define PLL_PLL_MISC_CONFIG			0x01D0
+#define PLL_FLL_CONFIG				0x01D4
+#define PLL_FLL_FREQ_ACQ_TIME			0x01D8
+#define PLL_FLL_CODE0				0x01DC
+#define PLL_FLL_CODE1				0x01E0
+#define PLL_FLL_GAIN0				0x01E4
+#define PLL_FLL_GAIN1				0x01E8
+#define PLL_SW_RESET				0x01EC
+#define PLL_FAST_PWRUP				0x01F0
+#define PLL_LOCKTIME0				0x01F4
+#define PLL_LOCKTIME1				0x01F8
+#define PLL_DEBUG_BUS_SEL			0x01FC
+#define PLL_DEBUG_BUS0				0x0200
+#define PLL_DEBUG_BUS1				0x0204
+#define PLL_DEBUG_BUS2				0x0208
+#define PLL_DEBUG_BUS3				0x020C
+#define PLL_ANALOG_FLL_CONTROL_OVERRIDES	0x0210
+#define PLL_VCO_CONFIG				0x0214
+#define PLL_VCO_CAL_CODE1_MODE0_STATUS		0x0218
+#define PLL_VCO_CAL_CODE1_MODE1_STATUS		0x021C
+#define PLL_RESET_SM_STATUS			0x0220
+#define PLL_TDC_OFFSET				0x0224
+#define PLL_PS3_PWRDOWN_CONTROLS		0x0228
+#define PLL_PS4_PWRDOWN_CONTROLS		0x022C
+#define PLL_PLL_RST_CONTROLS			0x0230
+#define PLL_GEAR_BAND_SELECT_CONTROLS		0x0234
+#define PLL_PSM_CLK_CONTROLS			0x0238
+#define PLL_SYSTEM_MUXES_2			0x023C
+#define PLL_VCO_CONFIG_1			0x0240
+#define PLL_VCO_CONFIG_2			0x0244
+#define PLL_CLOCK_INVERTERS_1			0x0248
+#define PLL_CLOCK_INVERTERS_2			0x024C
+#define PLL_CMODE_1				0x0250
+#define PLL_CMODE_2				0x0254
+#define PLL_ANALOG_CONTROLS_FIVE_1		0x0258
+#define PLL_ANALOG_CONTROLS_FIVE_2		0x025C
+
+/* Register Offsets from PHY base address */
+#define PHY_CMN_CLK_CFG0	0x010
+#define PHY_CMN_CLK_CFG1	0x014
+#define PHY_CMN_RBUF_CTRL	0x01C
+#define PHY_CMN_CTRL_0		0x024
+#define PHY_CMN_CTRL_3		0x030
+#define PHY_CMN_PLL_CNTRL	0x03C
+#define PHY_CMN_GLBL_DIGTOP_SPARE4 0x128
+
+/* Bit definition of SSC control registers */
+#define SSC_CENTER		BIT(0)
+#define SSC_EN			BIT(1)
+#define SSC_FREQ_UPDATE		BIT(2)
+#define SSC_FREQ_UPDATE_MUX	BIT(3)
+#define SSC_UPDATE_SSC		BIT(4)
+#define SSC_UPDATE_SSC_MUX	BIT(5)
+#define SSC_START		BIT(6)
+#define SSC_START_MUX		BIT(7)
+
+enum {
+	DSI_PLL_0,
+	DSI_PLL_1,
+	DSI_PLL_MAX
+};
+
+struct dsi_pll_regs {
+	u32 pll_prop_gain_rate;
+	u32 pll_lockdet_rate;
+	u32 decimal_div_start;
+	u32 frac_div_start_low;
+	u32 frac_div_start_mid;
+	u32 frac_div_start_high;
+	u32 pll_clock_inverters;
+	u32 ssc_stepsize_low;
+	u32 ssc_stepsize_high;
+	u32 ssc_div_per_low;
+	u32 ssc_div_per_high;
+	u32 ssc_adjper_low;
+	u32 ssc_adjper_high;
+	u32 ssc_control;
+};
+
+struct dsi_pll_config {
+	u32 ref_freq;
+	bool div_override;
+	u32 output_div;
+	bool ignore_frac;
+	bool disable_prescaler;
+	bool enable_ssc;
+	bool ssc_center;
+	u32 dec_bits;
+	u32 frac_bits;
+	u32 lock_timer;
+	u32 ssc_freq;
+	u32 ssc_offset;
+	u32 ssc_adj_per;
+	u32 thresh_cycles;
+	u32 refclk_cycles;
+};
+
+struct dsi_pll_7nm {
+	struct mdss_pll_resources *rsc;
+	struct dsi_pll_config pll_configuration;
+	struct dsi_pll_regs reg_setup;
+};
+
+static inline int pll_reg_read(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	u32 data;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	/*
+	 * DSI PHY/PLL should be both powered on when reading PLL
+	 * registers. Since PHY power has been enabled in DSI PHY
+	 * driver, only PLL power is needed to enable here.
+	 */
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
+	ndelay(250);
+
+	*val = MDSS_PLL_REG_R(rsc->pll_base, reg);
+
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int pll_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(rsc->pll_base, reg, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_read(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	*val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_write(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(rsc->phy_base, reg, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int phy_reg_update_bits_sub(struct mdss_pll_resources *rsc,
+		unsigned int reg, unsigned int mask, unsigned int val)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	reg_val &= ~mask;
+	reg_val |= (val & mask);
+	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+	return 0;
+}
+
+static inline int phy_reg_update_bits(void *context, unsigned int reg,
+				unsigned int mask, unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = phy_reg_update_bits_sub(rsc, reg, mask, val);
+	if (!rc && rsc->slave)
+		rc = phy_reg_update_bits_sub(rsc->slave, reg, mask, val);
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static inline int pclk_mux_read_sel(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc)
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+	else
+		*val = (MDSS_PLL_REG_R(rsc->phy_base, reg) & 0x3);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+	return rc;
+}
+
+
+static inline int pclk_mux_write_sel_sub(struct mdss_pll_resources *rsc,
+				unsigned int reg, unsigned int val)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, reg);
+	reg_val &= ~0x03;
+	reg_val |= val;
+
+	MDSS_PLL_REG_W(rsc->phy_base, reg, reg_val);
+
+	return 0;
+}
+
+static inline int pclk_mux_write_sel(void *context, unsigned int reg,
+					unsigned int val)
+{
+	int rc = 0;
+	struct mdss_pll_resources *rsc = context;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	rc = pclk_mux_write_sel_sub(rsc, reg, val);
+	if (!rc && rsc->slave)
+		rc = pclk_mux_write_sel_sub(rsc->slave, reg, val);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	/*
+	 * cache the current parent index for cases where parent
+	 * is not changing but rate is changing. In that case
+	 * clock framework won't call parent_set and hence dsiclk_sel
+	 * bit won't be programmed. e.g. dfps update use case.
+	 */
+	rsc->cached_cfg1 = val;
+
+	return rc;
+}
+
+static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
+static struct dsi_pll_7nm plls[DSI_PLL_MAX];
+
+static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
+{
+	u32 reg;
+	struct mdss_pll_resources *orsc = pll_rsc_db[DSI_PLL_1];
+
+	if (!rsc)
+		return;
+
+	/* Only DSI PLL0 can act as a master */
+	if (rsc->index != DSI_PLL_0)
+		return;
+
+	/* default configuration: source is either internal or ref clock */
+	rsc->slave = NULL;
+
+	if (!orsc) {
+		pr_warn("slave PLL unavilable, assuming standalone config\n");
+		return;
+	}
+
+	/* check to see if the source of DSI1 PLL bitclk is set to external */
+	reg = MDSS_PLL_REG_R(orsc->phy_base, PHY_CMN_CLK_CFG1);
+	reg &= (BIT(2) | BIT(3));
+	if (reg == 0x04)
+		rsc->slave = pll_rsc_db[DSI_PLL_1]; /* external source */
+
+	pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
+}
+
+static void dsi_pll_setup_config(struct dsi_pll_7nm *pll,
+				 struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+
+	config->ref_freq = 19200000;
+	config->output_div = 1;
+	config->dec_bits = 8;
+	config->frac_bits = 18;
+	config->lock_timer = 64;
+	config->ssc_freq = 31500;
+	config->ssc_offset = 5000;
+	config->ssc_adj_per = 2;
+	config->thresh_cycles = 32;
+	config->refclk_cycles = 256;
+
+	config->div_override = false;
+	config->ignore_frac = false;
+	config->disable_prescaler = false;
+	config->enable_ssc = rsc->ssc_en;
+	config->ssc_center = rsc->ssc_center;
+
+	if (config->enable_ssc) {
+		if (rsc->ssc_freq)
+			config->ssc_freq = rsc->ssc_freq;
+		if (rsc->ssc_ppm)
+			config->ssc_offset = rsc->ssc_ppm;
+	}
+
+	dsi_pll_config_slave(rsc);
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll,
+				  struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u64 fref = rsc->vco_ref_clk_rate;
+	u64 pll_freq;
+	u64 divider;
+	u64 dec, dec_multiple;
+	u32 frac;
+	u64 multiplier;
+
+	pll_freq = rsc->vco_current_rate;
+
+	if (config->disable_prescaler)
+		divider = fref;
+	else
+		divider = fref * 2;
+
+	multiplier = 1 << config->frac_bits;
+	dec_multiple = div_u64(pll_freq * multiplier, divider);
+	div_u64_rem(dec_multiple, multiplier, &frac);
+
+	dec = div_u64(dec_multiple, multiplier);
+
+	regs->pll_clock_inverters = 0;
+	regs->pll_lockdet_rate = config->lock_timer;
+	regs->decimal_div_start = dec;
+	regs->frac_div_start_low = (frac & 0xff);
+	regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+	regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll,
+		  struct mdss_pll_resources *rsc)
+{
+	struct dsi_pll_config *config = &pll->pll_configuration;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+	u32 ssc_per;
+	u32 ssc_mod;
+	u64 ssc_step_size;
+	u64 frac;
+
+	if (!config->enable_ssc) {
+		pr_debug("SSC not enabled\n");
+		return;
+	}
+
+	ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+	ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+	ssc_per -= ssc_mod;
+
+	frac = regs->frac_div_start_low |
+			(regs->frac_div_start_mid << 8) |
+			(regs->frac_div_start_high << 16);
+	ssc_step_size = regs->decimal_div_start;
+	ssc_step_size *= (1 << config->frac_bits);
+	ssc_step_size += frac;
+	ssc_step_size *= config->ssc_offset;
+	ssc_step_size *= (config->ssc_adj_per + 1);
+	ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+	ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+	regs->ssc_div_per_low = ssc_per & 0xFF;
+	regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+	regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+	regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+	regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+	regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+	regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+	pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+			regs->decimal_div_start, frac, config->frac_bits);
+	pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+			ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll,
+		struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	struct dsi_pll_regs *regs = &pll->reg_setup;
+
+	if (pll->pll_configuration.enable_ssc) {
+		pr_debug("SSC is enabled\n");
+
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1,
+				regs->ssc_stepsize_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1,
+				regs->ssc_stepsize_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1,
+				regs->ssc_div_per_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1,
+				regs->ssc_div_per_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_1,
+				regs->ssc_adjper_low);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_1,
+				regs->ssc_adjper_high);
+		MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL,
+				SSC_EN | regs->ssc_control);
+	}
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll,
+				  struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE_1, 0x01);
+	MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_GEAR_BAND_SELECT_CONTROLS, 0x21);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE, 0x01);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_TWO, 0x03);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_THREE, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_DSM_DIVIDER, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_FEEDBACK_DIVIDER, 0x4e);
+	MDSS_PLL_REG_W(pll_base, PLL_CALIBRATION_SETTINGS, 0x40);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+	MDSS_PLL_REG_W(pll_base, PLL_OUTDIV, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
+	MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x29);
+	MDSS_PLL_REG_W(pll_base, PLL_PFILT, 0x2f);
+	MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x2a);
+	MDSS_PLL_REG_W(pll_base, PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_init_val(struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_ONE, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS, 0x0000003F);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_SETTINGS_TWO, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FOUR, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_INT_LOOP_CONTROLS, 0x00000080);
+	MDSS_PLL_REG_W(pll_base, PLL_SYSTEM_MUXES, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_UPDATE_CONTROL_OVERRIDES, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_CMODE, 0x00000010);
+	MDSS_PLL_REG_W(pll_base, PLL_PSM_CTRL, 0x00000020);
+	MDSS_PLL_REG_W(pll_base, PLL_RSM_CTRL, 0x00000010);
+	MDSS_PLL_REG_W(pll_base, PLL_VCO_TUNE_MAP, 0x00000002);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_CNTRL, 0x0000001C);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_LOW, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_TIMER_HIGH, 0x00000002);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS, 0x00000020);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MIN, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_MAX, 0x000000FF);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_PFILT, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_IFILT, 0x0000000A);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_TWO, 0x00000025);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_THREE, 0x000000BA);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL_SETTINGS_FOUR, 0x0000004F);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_HIGH, 0x0000000A);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_ICODE_LOW, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_SETTINGS_ONE, 0x0000000C);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DETECT_THRESH, 0x00000020);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DET_REFCLK_HIGH, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DET_REFCLK_LOW, 0x000000FF);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DET_PLLCLK_HIGH, 0x00000010);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_DET_PLLCLK_LOW, 0x00000046);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_GAIN, 0x00000054);
+	MDSS_PLL_REG_W(pll_base, PLL_ICODE_LOW, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_ICODE_HIGH, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_LOCKDET, 0x00000040);
+	MDSS_PLL_REG_W(pll_base, PLL_FASTLOCK_CONTROL, 0x00000004);
+	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_ONE, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_PASS_OUT_OVERRIDE_TWO, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_OVERRIDE, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x00000010);
+	MDSS_PLL_REG_W(pll_base, PLL_RATE_CHANGE, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS, 0x00000008);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_DIGITAL_TIMERS_TWO, 0x00000008);
+	MDSS_PLL_REG_W(pll_base, PLL_DEC_FRAC_MUXES, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_MASH_CONTROL, 0x00000003);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_MUX_CONTROL, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_LOW_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_STEPSIZE_HIGH_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_LOW_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_DIV_PER_HIGH_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_LOW_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_ADJPER_HIGH_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SSC_CONTROL, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_OUTDIV_RATE, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x00000040);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_2, 0x00000040);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_1, 0x0000000C);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_PROP_GAIN_RATE_2, 0x0000000A);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_1, 0x000000C0);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_BAND_SEL_RATE_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x00000054);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_INT_GAIN_IFILT_BAND_2, 0x00000054);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x0000004C);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_FL_INT_GAIN_PFILT_BAND_2, 0x0000004C);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_FASTLOCK_EN_BAND, 0x00000003);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MID, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_HIGH, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FREQ_TUNE_ACCUM_INIT_MUX, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x00000080);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x00000006);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_MIN_DELAY, 0x00000019);
+	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SPARE_AND_JPC_OVERRIDES, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_1, 0x00000040);
+	MDSS_PLL_REG_W(pll_base, PLL_BIAS_CONTROL_2, 0x00000020);
+	MDSS_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_CTRL_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_COMMON_STATUS_ONE, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_COMMON_STATUS_TWO, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_BAND_SEL_CAL, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_ICODE_ACCUM_STATUS_LOW, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_ICODE_ACCUM_STATUS_HIGH, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FD_OUT_LOW, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FD_OUT_HIGH, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_ALOG_OBSV_BUS_STATUS_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_MISC_CONFIG, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FLL_CONFIG, 0x00000002);
+	MDSS_PLL_REG_W(pll_base, PLL_FLL_FREQ_ACQ_TIME, 0x00000011);
+	MDSS_PLL_REG_W(pll_base, PLL_FLL_CODE0, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FLL_CODE1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FLL_GAIN0, 0x00000080);
+	MDSS_PLL_REG_W(pll_base, PLL_FLL_GAIN1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_SW_RESET, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_FAST_PWRUP, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_LOCKTIME0, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_LOCKTIME1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS_SEL, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS0, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_DEBUG_BUS3, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_FLL_CONTROL_OVERRIDES, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_VCO_CAL_CODE1_MODE0_STATUS, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_VCO_CAL_CODE1_MODE1_STATUS, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_RESET_SM_STATUS, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_TDC_OFFSET, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_PS3_PWRDOWN_CONTROLS, 0x0000001D);
+	MDSS_PLL_REG_W(pll_base, PLL_PS4_PWRDOWN_CONTROLS, 0x0000001C);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_RST_CONTROLS, 0x000000FF);
+	MDSS_PLL_REG_W(pll_base, PLL_GEAR_BAND_SELECT_CONTROLS, 0x00000022);
+	MDSS_PLL_REG_W(pll_base, PLL_PSM_CLK_CONTROLS, 0x00000009);
+	MDSS_PLL_REG_W(pll_base, PLL_SYSTEM_MUXES_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_1, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_VCO_CONFIG_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_1, 0x00000040);
+	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_2, 0x00000000);
+	MDSS_PLL_REG_W(pll_base, PLL_CMODE_1, 0x00000010);
+	MDSS_PLL_REG_W(pll_base, PLL_CMODE_2, 0x00000010);
+	MDSS_PLL_REG_W(pll_base, PLL_ANALOG_CONTROLS_FIVE_2, 0x00000003);
+
+}
+
+static void dsi_pll_commit(struct dsi_pll_7nm *pll,
+			   struct mdss_pll_resources *rsc)
+{
+	void __iomem *pll_base = rsc->pll_base;
+	struct dsi_pll_regs *reg = &pll->reg_setup;
+
+	MDSS_PLL_REG_W(pll_base, PLL_CORE_INPUT_OVERRIDE, 0x12);
+	MDSS_PLL_REG_W(pll_base, PLL_DECIMAL_DIV_START_1,
+		       reg->decimal_div_start);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_LOW_1,
+		       reg->frac_div_start_low);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_MID_1,
+		       reg->frac_div_start_mid);
+	MDSS_PLL_REG_W(pll_base, PLL_FRAC_DIV_START_HIGH_1,
+		       reg->frac_div_start_high);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCKDET_RATE_1, 0x40);
+	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_DELAY, 0x06);
+	MDSS_PLL_REG_W(pll_base, PLL_CMODE_1, 0x10);
+	MDSS_PLL_REG_W(pll_base, PLL_CLOCK_INVERTERS_1,
+			reg->pll_clock_inverters);
+}
+
+static int vco_7nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *rsc = vco->priv;
+	struct dsi_pll_7nm *pll;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	if (rsc->pll_on)
+		return 0;
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	pr_debug("ndx=%d, rate=%lu\n", rsc->index, rate);
+
+	rsc->vco_current_rate = rate;
+	rsc->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("failed to enable mdss dsi pll(%d), rc=%d\n",
+		       rsc->index, rc);
+		return rc;
+	}
+
+	dsi_pll_init_val(rsc);
+
+	dsi_pll_setup_config(pll, rsc);
+
+	dsi_pll_calc_dec_frac(pll, rsc);
+
+	dsi_pll_calc_ssc(pll, rsc);
+
+	dsi_pll_commit(pll, rsc);
+
+	dsi_pll_config_hzindep_reg(pll, rsc);
+
+	dsi_pll_ssc_commit(pll, rsc);
+
+	/* flush, ensure all register writes are done*/
+	wmb();
+
+	mdss_pll_resource_enable(rsc, false);
+
+	return 0;
+}
+
+static int dsi_pll_7nm_lock_status(struct mdss_pll_resources *pll)
+{
+	int rc;
+	u32 status;
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+
+	rc = readl_poll_timeout_atomic(pll->pll_base + PLL_COMMON_STATUS_ONE,
+				       status,
+				       ((status & BIT(0)) > 0),
+				       delay_us,
+				       timeout_us);
+	if (rc && !pll->handoff_resources)
+		pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+			pll->index, status);
+
+	return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct mdss_pll_resources *rsc)
+{
+	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data & ~BIT(5));
+	ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct mdss_pll_resources *rsc)
+{
+	u32 data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CTRL_0);
+
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_0, data | BIT(5));
+	MDSS_PLL_REG_W(rsc->pll_base, PLL_SYSTEM_MUXES, 0xc0);
+	ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct mdss_pll_resources *rsc)
+{
+	u32 data;
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data & ~BIT(5)));
+}
+
+static void dsi_pll_enable_global_clk(struct mdss_pll_resources *rsc)
+{
+	u32 data;
+
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CTRL_3, 0x04);
+
+	data = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG1);
+
+	/* Turn on clk_en_sel bit prior to resync toggle fifo */
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG1, (data | BIT(5) |
+								BIT(4)));
+}
+
+static void dsi_pll_phy_dig_reset(struct mdss_pll_resources *rsc)
+{
+	/*
+	 * Reset the PHY digital domain. This would be needed when
+	 * coming out of a CX or analog rail power collapse while
+	 * ensuring that the pads maintain LP00 or LP11 state
+	 */
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+	wmb(); /* Ensure that the reset is asserted */
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+	wmb(); /* Ensure that the reset is deasserted */
+}
+
+static int dsi_pll_enable(struct dsi_pll_vco_clk *vco)
+{
+	int rc;
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	dsi_pll_enable_pll_bias(rsc);
+	if (rsc->slave)
+		dsi_pll_enable_pll_bias(rsc->slave);
+
+	phy_reg_update_bits_sub(rsc, PHY_CMN_CLK_CFG1, 0x03, rsc->cached_cfg1);
+	if (rsc->slave)
+		phy_reg_update_bits_sub(rsc->slave, PHY_CMN_CLK_CFG1,
+				0x03, rsc->cached_cfg1);
+	wmb(); /* ensure dsiclk_sel is always programmed before pll start */
+
+	/* Start PLL */
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0x01);
+
+	/*
+	 * ensure all PLL configurations are written prior to checking
+	 * for PLL lock.
+	 */
+	wmb();
+
+	/* Check for PLL lock */
+	rc = dsi_pll_7nm_lock_status(rsc);
+	if (rc) {
+		pr_err("PLL(%d) lock failed\n", rsc->index);
+		goto error;
+	}
+
+	rsc->pll_on = true;
+
+	/*
+	 * assert power on reset for PHY digital in case the PLL is
+	 * enabled after CX of analog domain power collapse. This needs
+	 * to be done before enabling the global clk.
+	 */
+	dsi_pll_phy_dig_reset(rsc);
+	if (rsc->slave)
+		dsi_pll_phy_dig_reset(rsc->slave);
+
+	dsi_pll_enable_global_clk(rsc);
+	if (rsc->slave)
+		dsi_pll_enable_global_clk(rsc->slave);
+
+error:
+	return rc;
+}
+
+static void dsi_pll_disable_sub(struct mdss_pll_resources *rsc)
+{
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_RBUF_CTRL, 0);
+	dsi_pll_disable_pll_bias(rsc);
+}
+
+static void dsi_pll_disable(struct dsi_pll_vco_clk *vco)
+{
+	struct mdss_pll_resources *rsc = vco->priv;
+
+	if (!rsc->pll_on &&
+	    mdss_pll_resource_enable(rsc, true)) {
+		pr_err("failed to enable pll (%d) resources\n", rsc->index);
+		return;
+	}
+
+	rsc->handoff_resources = false;
+
+	pr_debug("stop PLL (%d)\n", rsc->index);
+
+	/*
+	 * To avoid any stray glitches while
+	 * abruptly powering down the PLL
+	 * make sure to gate the clock using
+	 * the clock enable bit before powering
+	 * down the PLL
+	 */
+	dsi_pll_disable_global_clk(rsc);
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_PLL_CNTRL, 0);
+	dsi_pll_disable_sub(rsc);
+	if (rsc->slave) {
+		dsi_pll_disable_global_clk(rsc->slave);
+		dsi_pll_disable_sub(rsc->slave);
+	}
+	/* flush, ensure all register writes are done*/
+	wmb();
+	rsc->pll_on = false;
+}
+
+long vco_7nm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
+{
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	*parent_rate = rrate;
+
+	return rrate;
+}
+
+static void vco_7nm_unprepare(struct clk_hw *hw)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("dsi pll resources not available\n");
+		return;
+	}
+
+	/*
+	 * During unprepare in continuous splash use case we want driver
+	 * to pick all dividers instead of retaining bootloader configurations.
+	 */
+	if (!pll->handoff_resources) {
+		pll->cached_cfg0 = MDSS_PLL_REG_R(pll->phy_base,
+							PHY_CMN_CLK_CFG0);
+		pll->cached_outdiv = MDSS_PLL_REG_R(pll->pll_base,
+							PLL_PLL_OUTDIV_RATE);
+		pr_debug("cfg0=%d,cfg1=%d, outdiv=%d\n", pll->cached_cfg0,
+					pll->cached_cfg1, pll->cached_outdiv);
+
+		pll->vco_cached_rate = clk_hw_get_rate(hw);
+	}
+
+	/*
+	 * When continuous splash screen feature is enabled, we need to cache
+	 * the mux configuration for the pixel_clk_src mux clock. The clock
+	 * framework does not call back to re-configure the mux value if it is
+	 * does not change.For such usecases, we need to ensure that the cached
+	 * value is programmed prior to PLL being locked
+	 */
+	if (pll->handoff_resources)
+		pll->cached_cfg1 = MDSS_PLL_REG_R(pll->phy_base,
+							PHY_CMN_CLK_CFG1);
+
+	dsi_pll_disable(vco);
+	mdss_pll_resource_enable(pll, false);
+}
+
+static int vco_7nm_prepare(struct clk_hw *hw)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	/* Skip vco recalculation for continuous splash use case */
+	if (pll->handoff_resources)
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("failed to enable pll (%d) resource, rc=%d\n",
+		       pll->index, rc);
+		return rc;
+	}
+
+	if ((pll->vco_cached_rate != 0) &&
+	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
+				pll->vco_cached_rate);
+		if (rc) {
+			pr_err("pll(%d) set_rate failed, rc=%d\n",
+			       pll->index, rc);
+			mdss_pll_resource_enable(pll, false);
+			return rc;
+		}
+		pr_debug("cfg0=%d, cfg1=%d\n", pll->cached_cfg0,
+			pll->cached_cfg1);
+		MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0,
+					pll->cached_cfg0);
+		MDSS_PLL_REG_W(pll->pll_base, PLL_PLL_OUTDIV_RATE,
+					pll->cached_outdiv);
+	}
+
+	rc = dsi_pll_enable(vco);
+	if (rc) {
+		mdss_pll_resource_enable(pll, false);
+		pr_err("pll(%d) enable failed, rc=%d\n", pll->index, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static unsigned long vco_7nm_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+	struct mdss_pll_resources *pll = vco->priv;
+	int rc;
+	u64 ref_clk = vco->ref_clk_rate;
+	u64 vco_rate = 0;
+	u64 multiplier;
+	u32 frac;
+	u32 dec;
+	u32 outdiv;
+	u64 pll_freq, tmp64;
+
+	if (!vco->priv) {
+		pr_err("vco priv is null\n");
+		return 0;
+	}
+
+	/*
+	 * Calculate the vco rate from HW registers only for handoff cases.
+	 * For other cases where a vco_10nm_set_rate() has already been
+	 * called, just return the rate that was set earlier. This is due
+	 * to the fact that recalculating VCO rate requires us to read the
+	 * correct value of the pll_out_div divider clock, which is only set
+	 * afterwards.
+	 */
+	if (pll->vco_current_rate != 0) {
+		pr_debug("returning vco rate = %lld\n", pll->vco_current_rate);
+		return pll->vco_current_rate;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("failed to enable pll(%d) resource, rc=%d\n",
+		       pll->index, rc);
+		return 0;
+	}
+
+	pll->handoff_resources = true;
+	if (dsi_pll_7nm_lock_status(pll)) {
+		pr_debug("PLL not enabled\n");
+		pll->handoff_resources = false;
+		goto end;
+	}
+
+	dec = MDSS_PLL_REG_R(pll->pll_base, PLL_DECIMAL_DIV_START_1);
+	dec &= 0xFF;
+
+	frac = MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_LOW_1);
+	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_MID_1) &
+		  0xFF) <<
+		8);
+	frac |= ((MDSS_PLL_REG_R(pll->pll_base, PLL_FRAC_DIV_START_HIGH_1) &
+		  0x3) <<
+		16);
+
+	/* OUTDIV_1:0 field is (log(outdiv, 2)) */
+	outdiv = MDSS_PLL_REG_R(pll->pll_base, PLL_PLL_OUTDIV_RATE);
+	outdiv &= 0x3;
+	outdiv = 1 << outdiv;
+
+	/*
+	 * TODO:
+	 *	1. Assumes prescaler is disabled
+	 *	2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+	 **/
+	multiplier = 1 << 18;
+	pll_freq = dec * (ref_clk * 2);
+	tmp64 = (ref_clk * 2 * frac);
+	pll_freq += div_u64(tmp64, multiplier);
+
+	vco_rate = div_u64(pll_freq, outdiv);
+
+	pr_debug("dec=0x%x, frac=0x%x, outdiv=%d, vco=%llu\n",
+		 dec, frac, outdiv, vco_rate);
+
+end:
+	(void)mdss_pll_resource_enable(pll, false);
+	return (unsigned long)vco_rate;
+}
+
+static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
+{
+	int rc;
+	struct mdss_pll_resources *pll = context;
+	u32 reg_val;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	*div = (reg_val & 0xF0) >> 4;
+
+	/**
+	 * Common clock framework the divider value is interpreted as one less
+	 * hence we return one less for all dividers except when zero
+	 */
+	if (*div != 0)
+		*div -= 1;
+
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	reg_val &= ~0xF0;
+	reg_val |= (div << 4);
+	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
+{
+	int rc;
+	struct mdss_pll_resources *pll = context;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+	/**
+	 * In common clock framework the divider value provided is one less and
+	 * and hence adjusting the divider value by one prior to writing it to
+	 * hardware
+	 */
+	div++;
+	pixel_clk_set_div_sub(pll, div);
+	if (pll->slave)
+		pixel_clk_set_div_sub(pll->slave, div);
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return 0;
+}
+
+static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
+{
+	int rc;
+	struct mdss_pll_resources *pll = context;
+	u32 reg_val;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
+	*div = (reg_val & 0x0F);
+
+	/**
+	 *Common clock framework the divider value is interpreted as one less
+	 * hence we return one less for all dividers except when zero
+	 */
+	if (*div != 0)
+		*div -= 1;
+	(void)mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
+{
+	u32 reg_val;
+
+	reg_val = MDSS_PLL_REG_R(rsc->phy_base, PHY_CMN_CLK_CFG0);
+	reg_val &= ~0x0F;
+	reg_val |= div;
+	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
+}
+
+static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
+{
+	int rc;
+	struct mdss_pll_resources *rsc = context;
+	struct dsi_pll_8998 *pll;
+
+	if (!rsc) {
+		pr_err("pll resource not found\n");
+		return -EINVAL;
+	}
+
+	pll = rsc->priv;
+	if (!pll) {
+		pr_err("pll configuration not found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(rsc, true);
+	if (rc) {
+		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
+		return rc;
+	}
+
+	/**
+	 * In common clock framework the divider value provided is one less and
+	 * and hence adjusting the divider value by one prior to writing it to
+	 * hardware
+	 */
+	div++;
+
+	bit_clk_set_div_sub(rsc, div);
+	/* For slave PLL, this divider always should be set to 1 */
+	if (rsc->slave)
+		bit_clk_set_div_sub(rsc->slave, 1);
+
+	(void)mdss_pll_resource_enable(rsc, false);
+
+	return rc;
+}
+
+static struct regmap_config dsi_pll_7nm_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x7c0,
+};
+
+static struct regmap_bus pll_regmap_bus = {
+	.reg_write = pll_reg_write,
+	.reg_read = pll_reg_read,
+};
+
+static struct regmap_bus pclk_src_mux_regmap_bus = {
+	.reg_read = pclk_mux_read_sel,
+	.reg_write = pclk_mux_write_sel,
+};
+
+static struct regmap_bus pclk_src_regmap_bus = {
+	.reg_write = pixel_clk_set_div,
+	.reg_read = pixel_clk_get_div,
+};
+
+static struct regmap_bus bitclk_src_regmap_bus = {
+	.reg_write = bit_clk_set_div,
+	.reg_read = bit_clk_get_div,
+};
+
+static const struct clk_ops clk_ops_vco_7nm = {
+	.recalc_rate = vco_7nm_recalc_rate,
+	.set_rate = vco_7nm_set_rate,
+	.round_rate = vco_7nm_round_rate,
+	.prepare = vco_7nm_prepare,
+	.unprepare = vco_7nm_unprepare,
+};
+
+static struct regmap_bus mdss_mux_regmap_bus = {
+	.reg_write = mdss_set_mux_sel,
+	.reg_read = mdss_get_mux_sel,
+};
+
+/*
+ * Clock tree for generating DSI byte and pclk.
+ *
+ *
+ *                  +---------------+
+ *                  |    vco_clk    |
+ *                  +-------+-------+
+ *                          |
+ *                          |
+ *                  +---------------+
+ *                  |  pll_out_div  |
+ *                  |  DIV(1,2,4,8) |
+ *                  +-------+-------+
+ *                          |
+ *                          +-----------------------------+--------+
+ *                          |                             |        |
+ *                  +-------v-------+                     |        |
+ *                  |  bitclk_src   |                     |        |
+ *                  |  DIV(1..15)   |                     |        |
+ *                  +-------+-------+                     |        |
+ *                          |                             |        |
+ *                          +----------+---------+        |        |
+ *   Shadow Path            |          |         |        |        |
+ *       +          +-------v-------+  |  +------v------+ | +------v-------+
+ *       |          |  byteclk_src  |  |  |post_bit_div | | |post_vco_div  |
+ *       |          |  DIV(8)       |  |  |DIV (2)      | | |DIV(4)        |
+ *       |          +-------+-------+  |  +------+------+ | +------+-------+
+ *       |                  |          |         |      | |        |
+ *       |                  |          |         +------+ |        |
+ *       |                  |          +-------------+  | |   +----+
+ *       |         +--------+                        |  | |   |
+ *       |         |                               +-v--v-v---v------+
+ *     +-v---------v----+                           \  pclk_src_mux /
+ *     \  byteclk_mux /                              \             /
+ *      \            /                                +-----+-----+
+ *       +----+-----+                                       |        Shadow Path
+ *            |                                             |             +
+ *            v                                       +-----v------+      |
+ *       dsi_byte_clk                                 |  pclk_src  |      |
+ *                                                    | DIV(1..15) |      |
+ *                                                    +-----+------+      |
+ *                                                          |             |
+ *                                                          |             |
+ *                                                          +--------+    |
+ *                                                                   |    |
+ *                                                               +---v----v----+
+ *                                                                \  pclk_mux /
+ *                                                                 \         /
+ *                                                                  +---+---+
+ *                                                                      |
+ *                                                                      |
+ *                                                                      v
+ *                                                                   dsi_pclk
+ *
+ */
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_7nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1000000000UL,
+	.max_rate = 3500000000UL,
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_vco_clk",
+			.parent_names = (const char *[]){"bi_tcxo"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_7nm,
+			.flags = CLK_GET_RATE_NOCACHE,
+	},
+};
+
+static struct clk_regmap_div dsi0pll_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pll_out_div",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pll_out_div = {
+	.reg = PLL_PLL_OUTDIV_RATE,
+	.shift = 0,
+	.width = 2,
+	.flags = CLK_DIVIDER_POWER_OF_TWO,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pll_out_div",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_bitclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_post_vco_div",
+		.parent_names = (const char *[]){"dsi0pll_pll_out_div"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_post_vco_div = {
+	.div = 4,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_post_vco_div",
+		.parent_names = (const char *[]){"dsi1pll_pll_out_div"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi0pll_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_post_bit_div",
+		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_post_bit_div = {
+	.div = 2,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_post_bit_div",
+		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = CLK_GET_RATE_NOCACHE,
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_byteclk_mux = {
+	.shift = 0,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0_phy_pll_out_byteclk",
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_byteclk_mux = {
+	.shift = 0,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1_phy_pll_out_byteclk",
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi0pll_bitclk_src",
+					"dsi0pll_post_bit_div",
+					"dsi0pll_pll_out_div",
+					"dsi0pll_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+	.reg = PHY_CMN_CLK_CFG1,
+	.shift = 0,
+	.width = 2,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi1pll_bitclk_src",
+					"dsi1pll_post_bit_div",
+					"dsi1pll_pll_out_div",
+					"dsi1pll_post_vco_div"},
+			.num_parents = 4,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pclk_src = {
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi1pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_mux = {
+	.shift = 0,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0_phy_pll_out_dsiclk",
+			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_mux = {
+	.shift = 0,
+	.width = 1,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1_phy_pll_out_dsiclk",
+			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_hw *mdss_dsi_pllcc_7nm[] = {
+	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+	[PLL_OUT_DIV_0_CLK] = &dsi0pll_pll_out_div.clkr.hw,
+	[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
+	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.hw,
+	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.hw,
+	[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
+	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+	[PLL_OUT_DIV_1_CLK] = &dsi1pll_pll_out_div.clkr.hw,
+	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
+	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.hw,
+	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.hw,
+	[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
+	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+};
+
+int dsi_pll_clock_register_7nm(struct platform_device *pdev,
+				  struct mdss_pll_resources *pll_res)
+{
+	int rc = 0, ndx, i;
+	struct clk *clk;
+	struct clk_onecell_data *clk_data;
+	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_7nm);
+	struct regmap *rmap;
+
+	ndx = pll_res->index;
+
+	if (ndx >= DSI_PLL_MAX) {
+		pr_err("pll index(%d) NOT supported\n", ndx);
+		return -EINVAL;
+	}
+
+	pll_rsc_db[ndx] = pll_res;
+	plls[ndx].rsc = pll_res;
+	pll_res->priv = &plls[ndx];
+	pll_res->vco_delay = VCO_DELAY_USEC;
+
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(*clk_data), GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
+
+	clk_data->clks = devm_kcalloc(&pdev->dev, num_clks,
+				sizeof(struct clk *), GFP_KERNEL);
+	if (!clk_data->clks)
+		return -ENOMEM;
+
+	clk_data->clk_num = num_clks;
+
+	/* Establish client data */
+	if (ndx == 0) {
+
+		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi0pll_pll_out_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi0pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi0pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi0pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi0pll_byteclk_mux.clkr.regmap = rmap;
+
+		dsi0pll_vco_clk.priv = pll_res;
+		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_7nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+							pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+
+
+	} else {
+		rmap = devm_regmap_init(&pdev->dev, &pll_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi1pll_pll_out_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi1pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi1pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi1pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_mux_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_7nm_config);
+		dsi1pll_byteclk_mux.clkr.regmap = rmap;
+		dsi1pll_vco_clk.priv = pll_res;
+
+		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_7nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+						pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	}
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d, clocks successfully\n",
+				ndx);
+
+		return rc;
+	}
+clk_register_fail:
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c
new file mode 100644
index 0000000..02d7f09
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996-util.c
@@ -0,0 +1,1130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-8996.h"
+
+#define DSI_PLL_POLL_MAX_READS                  15
+#define DSI_PLL_POLL_TIMEOUT_US                 1000
+#define MSM8996_DSI_PLL_REVISION_2		2
+
+#define CEIL(x, y)		(((x) + ((y)-1)) / (y))
+
+int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel)
+{
+	return 0;
+}
+
+int get_mdss_byte_mux_sel_8996(struct mux_clk *clk)
+{
+	return 0;
+}
+
+int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel)
+{
+	return 0;
+}
+
+int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk)
+{
+	return 0;
+}
+
+static int mdss_pll_read_stored_trim_codes(
+		struct mdss_pll_resources *dsi_pll_res, s64 vco_clk_rate)
+{
+	int i;
+	int rc = 0;
+	bool found = false;
+
+	if (!dsi_pll_res->dfps) {
+		rc = -EINVAL;
+		goto end_read;
+	}
+
+	for (i = 0; i < dsi_pll_res->dfps->panel_dfps.frame_rate_cnt; i++) {
+		struct dfps_codes_info *codes_info =
+			&dsi_pll_res->dfps->codes_dfps[i];
+
+		pr_debug("valid=%d frame_rate=%d, vco_rate=%d, code %d %d\n",
+			codes_info->is_valid, codes_info->frame_rate,
+			codes_info->clk_rate, codes_info->pll_codes.pll_codes_1,
+			codes_info->pll_codes.pll_codes_2);
+
+		if (vco_clk_rate != codes_info->clk_rate &&
+				codes_info->is_valid)
+			continue;
+
+		dsi_pll_res->cache_pll_trim_codes[0] =
+			codes_info->pll_codes.pll_codes_1;
+		dsi_pll_res->cache_pll_trim_codes[1] =
+			codes_info->pll_codes.pll_codes_2;
+		found = true;
+		break;
+	}
+
+	if (!found) {
+		rc = -EINVAL;
+		goto end_read;
+	}
+
+	pr_debug("core_kvco_code=0x%x core_vco_tune=0x%x\n",
+			dsi_pll_res->cache_pll_trim_codes[0],
+			dsi_pll_res->cache_pll_trim_codes[1]);
+
+end_read:
+	return rc;
+}
+
+int post_n1_div_set_div(struct div_clk *clk, int div)
+{
+	struct mdss_pll_resources *pll = clk->priv;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	int rc;
+	u32 n1div = 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	pout = &pdb->out;
+
+	/*
+	 * vco rate = bit_clk * postdiv * n1div
+	 * vco range from 1300 to 2600 Mhz
+	 * postdiv = 1
+	 * n1div = 1 to 15
+	 * n1div = roundup(1300Mhz / bit_clk)
+	 * support bit_clk above 86.67Mhz
+	 */
+
+	/* this is for vco/bit clock */
+	pout->pll_postdiv = 1;	/* fixed, divided by 1 */
+	pout->pll_n1div  = div;
+
+	n1div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n1div &= ~0xf;
+	n1div |= (div & 0xf);
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n1div);
+	/* ensure n1 divider is programed */
+	wmb();
+	pr_debug("ndx=%d div=%d postdiv=%x n1div=%x\n",
+			pll->index, div, pout->pll_postdiv, pout->pll_n1div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return 0;
+}
+
+int post_n1_div_get_div(struct div_clk *clk)
+{
+	u32  div;
+	int rc;
+	struct mdss_pll_resources *pll = clk->priv;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/*
+	 * postdiv = 1/2/4/8
+	 * n1div = 1 - 15
+	 * fot the time being, assume postdiv = 1
+	 */
+
+	div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	div &= 0xF;
+	pr_debug("n1 div = %d\n", div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return div;
+}
+
+int n2_div_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	u32 n2div;
+	struct mdss_pll_resources *pll = clk->priv;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	struct mdss_pll_resources *slave;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	pout = &pdb->out;
+
+	/* this is for pixel clock */
+	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n2div &= ~0xf0;	/* bits 4 to 7 */
+	n2div |= (div << 4);
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+	/* commit slave if split display is enabled */
+	slave = pll->slave;
+	if (slave)
+		MDSS_PLL_REG_W(slave->pll_base, DSIPHY_CMN_CLK_CFG0, n2div);
+
+	pout->pll_n2div = div;
+
+	/* set dsiclk_sel=1 so that n2div *= 2 */
+	MDSS_PLL_REG_W(pll->pll_base, DSIPHY_CMN_CLK_CFG1, 1);
+	pr_debug("ndx=%d div=%d n2div=%x\n", pll->index, div, n2div);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+int shadow_n2_div_set_div(struct div_clk *clk, int div)
+{
+	struct mdss_pll_resources *pll = clk->priv;
+	struct dsi_pll_db *pdb;
+	struct dsi_pll_output *pout;
+	u32 data;
+
+	pdb = pll->priv;
+	pout = &pdb->out;
+
+	pout->pll_n2div = div;
+
+	data = (pout->pll_n1div | (pout->pll_n2div << 4));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+			DSI_DYNAMIC_REFRESH_PLL_CTRL19,
+			DSIPHY_CMN_CLK_CFG0, DSIPHY_CMN_CLK_CFG1,
+			data, 1);
+	return 0;
+}
+
+int n2_div_get_div(struct div_clk *clk)
+{
+	int rc;
+	u32 n2div;
+	struct mdss_pll_resources *pll = clk->priv;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll=%d resources\n",
+						pll->index);
+		return rc;
+	}
+
+	n2div = MDSS_PLL_REG_R(pll->pll_base, DSIPHY_CMN_CLK_CFG0);
+	n2div >>= 4;
+	n2div &= 0x0f;
+
+	mdss_pll_resource_enable(pll, false);
+
+	pr_debug("ndx=%d div=%d\n", pll->index, n2div);
+
+	return n2div;
+}
+
+static bool pll_is_pll_locked_8996(struct mdss_pll_resources *pll)
+{
+	u32 status;
+	bool pll_locked;
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((pll->pll_base +
+			DSIPHY_PLL_RESET_SM_READY_STATUS),
+			status,
+			((status & BIT(5)) > 0),
+			DSI_PLL_POLL_MAX_READS,
+			DSI_PLL_POLL_TIMEOUT_US)) {
+		pr_err("DSI PLL ndx=%d status=%x failed to Lock\n",
+				pll->index, status);
+		pll_locked = false;
+	} else if (readl_poll_timeout_atomic((pll->pll_base +
+				DSIPHY_PLL_RESET_SM_READY_STATUS),
+				status,
+				((status & BIT(0)) > 0),
+				DSI_PLL_POLL_MAX_READS,
+				DSI_PLL_POLL_TIMEOUT_US)) {
+		pr_err("DSI PLL ndx=%d status=%x PLl not ready\n",
+				pll->index, status);
+		pll_locked = false;
+	} else {
+		pll_locked = true;
+	}
+
+	return pll_locked;
+}
+
+static void dsi_pll_start_8996(void __iomem *pll_base)
+{
+	pr_debug("start PLL at base=%p\n", pll_base);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VREF_CFG1, 0x10);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 1);
+}
+
+static void dsi_pll_stop_8996(void __iomem *pll_base)
+{
+	pr_debug("stop PLL at base=%p\n", pll_base);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+}
+
+int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll)
+{
+	int rc = 0;
+
+	if (!pll) {
+		pr_err("Invalid PLL resources\n");
+		return -EINVAL;
+	}
+
+	dsi_pll_start_8996(pll->pll_base);
+
+	/*
+	 * both DSIPHY_PLL_CLKBUFLR_EN and DSIPHY_CMN_GLBL_TEST_CTRL
+	 * enabled at mdss_dsi_8996_phy_config()
+	 */
+
+	if (!pll_is_pll_locked_8996(pll)) {
+		pr_err("DSI PLL ndx=%d lock failed\n", pll->index);
+		rc = -EINVAL;
+		goto init_lock_err;
+	}
+
+	pr_debug("DSI PLL ndx=%d Lock success\n", pll->index);
+
+init_lock_err:
+	return rc;
+}
+
+static int dsi_pll_enable(struct clk *c)
+{
+	int i, rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	/* Try all enable sequences until one succeeds */
+	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+		rc = vco->pll_enable_seqs[i](pll);
+		pr_debug("DSI PLL %s after sequence #%d\n",
+			rc ? "unlocked" : "locked", i + 1);
+		if (!rc)
+			break;
+	}
+
+	if (rc)
+		pr_err("ndx=%d DSI PLL failed to lock\n", pll->index);
+	else
+		pll->pll_on = true;
+
+	return rc;
+}
+
+static void dsi_pll_disable(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct mdss_pll_resources *slave;
+
+	if (!pll->pll_on &&
+		mdss_pll_resource_enable(pll, true)) {
+		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+		return;
+	}
+
+	pll->handoff_resources = false;
+	slave = pll->slave;
+
+	dsi_pll_stop_8996(pll->pll_base);
+
+	mdss_pll_resource_enable(pll, false);
+
+	pll->pll_on = false;
+
+	pr_debug("DSI PLL ndx=%d Disabled\n", pll->index);
+}
+
+static void mdss_dsi_pll_8996_input_init(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	pdb->in.fref = 19200000;	/* 19.2 Mhz*/
+	pdb->in.fdata = 0;		/* bit clock rate */
+	pdb->in.dsiclk_sel = 1;		/* 1, reg: 0x0014 */
+	pdb->in.ssc_en = pll->ssc_en;		/* 1, reg: 0x0494, bit 0 */
+	pdb->in.ldo_en = 0;		/* 0,  reg: 0x004c, bit 0 */
+
+	/* fixed  input */
+	pdb->in.refclk_dbler_en = 0;	/* 0, reg: 0x04c0, bit 1 */
+	pdb->in.vco_measure_time = 5;	/* 5, unknown */
+	pdb->in.kvco_measure_time = 5;	/* 5, unknown */
+	pdb->in.bandgap_timer = 4;	/* 4, reg: 0x0430, bit 3 - 5 */
+	pdb->in.pll_wakeup_timer = 5;	/* 5, reg: 0x043c, bit 0 - 2 */
+	pdb->in.plllock_cnt = 1;	/* 1, reg: 0x0488, bit 1 - 2 */
+	pdb->in.plllock_rng = 0;	/* 0, reg: 0x0488, bit 3 - 4 */
+	pdb->in.ssc_center = pll->ssc_center;/* 0, reg: 0x0494, bit 1 */
+	pdb->in.ssc_adj_period = 37;	/* 37, reg: 0x498, bit 0 - 9 */
+	pdb->in.ssc_spread = pll->ssc_ppm / 1000;
+	pdb->in.ssc_freq = pll->ssc_freq;
+
+	pdb->in.pll_ie_trim = 4;	/* 4, reg: 0x0400 */
+	pdb->in.pll_ip_trim = 4;	/* 4, reg: 0x0404 */
+	pdb->in.pll_cpcset_cur = 1;	/* 1, reg: 0x04f0, bit 0 - 2 */
+	pdb->in.pll_cpmset_cur = 1;	/* 1, reg: 0x04f0, bit 3 - 5 */
+	pdb->in.pll_icpmset = 4;	/* 4, reg: 0x04fc, bit 3 - 5 */
+	pdb->in.pll_icpcset = 4;	/* 4, reg: 0x04fc, bit 0 - 2 */
+	pdb->in.pll_icpmset_p = 0;	/* 0, reg: 0x04f4, bit 0 - 2 */
+	pdb->in.pll_icpmset_m = 0;	/* 0, reg: 0x04f4, bit 3 - 5 */
+	pdb->in.pll_icpcset_p = 0;	/* 0, reg: 0x04f8, bit 0 - 2 */
+	pdb->in.pll_icpcset_m = 0;	/* 0, reg: 0x04f8, bit 3 - 5 */
+	pdb->in.pll_lpf_res1 = 3;	/* 3, reg: 0x0504, bit 0 - 3 */
+	pdb->in.pll_lpf_cap1 = 11;	/* 11, reg: 0x0500, bit 0 - 3 */
+	pdb->in.pll_lpf_cap2 = 1;	/* 1, reg: 0x0500, bit 4 - 7 */
+	pdb->in.pll_iptat_trim = 7;
+	pdb->in.pll_c3ctrl = 2;		/* 2 */
+	pdb->in.pll_r3ctrl = 1;		/* 1 */
+}
+
+static void pll_8996_ssc_calc(struct mdss_pll_resources *pll,
+				struct dsi_pll_db *pdb)
+{
+	u32 period, ssc_period;
+	u32 ref, rem;
+	s64 step_size;
+
+	pr_debug("%s: vco=%lld ref=%lld\n", __func__,
+		pll->vco_current_rate, pll->vco_ref_clk_rate);
+
+	ssc_period = pdb->in.ssc_freq / 500;
+	period = (unsigned long)pll->vco_ref_clk_rate / 1000;
+	ssc_period  = CEIL(period, ssc_period);
+	ssc_period -= 1;
+	pdb->out.ssc_period = ssc_period;
+
+	pr_debug("%s: ssc, freq=%d spread=%d period=%d\n", __func__,
+	pdb->in.ssc_freq, pdb->in.ssc_spread, pdb->out.ssc_period);
+
+	step_size = (u32)pll->vco_current_rate;
+	ref = pll->vco_ref_clk_rate;
+	ref /= 1000;
+	step_size = div_s64(step_size, ref);
+	step_size <<= 20;
+	step_size = div_s64(step_size, 1000);
+	step_size *= pdb->in.ssc_spread;
+	step_size = div_s64(step_size, 1000);
+	step_size *= (pdb->in.ssc_adj_period + 1);
+
+	rem = 0;
+	step_size = div_s64_rem(step_size, ssc_period + 1, &rem);
+	if (rem)
+		step_size++;
+
+	pr_debug("%s: step_size=%lld\n", __func__, step_size);
+
+	step_size &= 0x0ffff;	/* take lower 16 bits */
+
+	pdb->out.ssc_step_size = step_size;
+}
+
+static void pll_8996_dec_frac_calc(struct mdss_pll_resources *pll,
+				struct dsi_pll_db *pdb)
+{
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	s64 multiplier = BIT(20);
+	s64 dec_start_multiple, dec_start, pll_comp_val;
+	s32 duration, div_frac_start;
+	s64 vco_clk_rate = pll->vco_current_rate;
+	s64 fref = pll->vco_ref_clk_rate;
+
+	pr_debug("vco_clk_rate=%lld ref_clk_rate=%lld\n",
+				vco_clk_rate, fref);
+
+	dec_start_multiple = div_s64(vco_clk_rate * multiplier, fref);
+	div_s64_rem(dec_start_multiple, multiplier, &div_frac_start);
+
+	dec_start = div_s64(dec_start_multiple, multiplier);
+
+	pout->dec_start = (u32)dec_start;
+	pout->div_frac_start = div_frac_start;
+
+	if (pin->plllock_cnt == 0)
+		duration = 1024;
+	else if (pin->plllock_cnt == 1)
+		duration = 256;
+	else if (pin->plllock_cnt == 2)
+		duration = 128;
+	else
+		duration = 32;
+
+	pll_comp_val =  duration * dec_start_multiple;
+	pll_comp_val =  div_s64(pll_comp_val, multiplier);
+	do_div(pll_comp_val, 10);
+
+	pout->plllock_cmp = (u32)pll_comp_val;
+
+	pout->pll_txclk_en = 1;
+	if (pll->revision == MSM8996_DSI_PLL_REVISION_2)
+		pout->cmn_ldo_cntrl = 0x3c;
+	else
+		pout->cmn_ldo_cntrl = 0x1c;
+}
+
+static u32 pll_8996_kvco_slop(u32 vrate)
+{
+	u32 slop = 0;
+
+	if (vrate > 1300000000UL && vrate <= 1800000000UL)
+		slop =  600;
+	else if (vrate > 1800000000UL && vrate < 2300000000UL)
+		slop = 400;
+	else if (vrate > 2300000000UL && vrate < 2600000000UL)
+		slop = 280;
+
+	return slop;
+}
+
+static void pll_8996_calc_vco_count(struct dsi_pll_db *pdb,
+			 s64 vco_clk_rate, s64 fref)
+{
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	s64 data;
+	u32 cnt;
+
+	data = fref * pin->vco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 2;
+	pout->pll_vco_div_ref = data;
+
+	data = (unsigned long)vco_clk_rate / 1000000;	/* unit is Mhz */
+	data *= pin->vco_measure_time;
+	do_div(data, 10);
+	pout->pll_vco_count = data; /* reg: 0x0474, 0x0478 */
+
+	data = fref * pin->kvco_measure_time;
+	do_div(data, 1000000);
+	data &= 0x03ff;	/* 10 bits */
+	data -= 1;
+	pout->pll_kvco_div_ref = data;
+
+	cnt = pll_8996_kvco_slop(vco_clk_rate);
+	cnt *= 2;
+	do_div(cnt, 100);
+	cnt *= pin->kvco_measure_time;
+	pout->pll_kvco_count = cnt;
+
+	pout->pll_misc1 = 16;
+	pout->pll_resetsm_cntrl = 48;
+	pout->pll_resetsm_cntrl2 = pin->bandgap_timer << 3;
+	pout->pll_resetsm_cntrl5 = pin->pll_wakeup_timer;
+	pout->pll_kvco_code = 0;
+}
+
+static void pll_db_commit_ssc(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	data = pin->ssc_adj_period;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER1, data);
+	data = (pin->ssc_adj_period >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_ADJ_PER2, data);
+
+	data = pout->ssc_period;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER1, data);
+	data = (pout->ssc_period >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_PER2, data);
+
+	data = pout->ssc_step_size;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE1, data);
+	data = (pout->ssc_step_size >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_STEP_SIZE2, data);
+
+	data = (pin->ssc_center & 0x01);
+	data <<= 1;
+	data |= 0x01; /* enable */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SSC_EN_CENTER, data);
+
+	wmb();	/* make sure register committed */
+}
+
+static void pll_db_commit_common(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	/* confgiure the non frequency dependent pll registers */
+	data = 0;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_SYSCLK_EN_RESET, data);
+
+	/* DSIPHY_PLL_CLKBUFLR_EN updated at dsi phy */
+
+	data = pout->pll_txclk_en;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_TXCLK_EN, data);
+
+	data = pout->pll_resetsm_cntrl;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL, data);
+	data = pout->pll_resetsm_cntrl2;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL2, data);
+	data = pout->pll_resetsm_cntrl5;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_RESETSM_CNTRL5, data);
+
+	data = pout->pll_vco_div_ref;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF1, data);
+	data = (pout->pll_vco_div_ref >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_DIV_REF2, data);
+
+	data = pout->pll_kvco_div_ref;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF1, data);
+	data = (pout->pll_kvco_div_ref >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_DIV_REF2, data);
+
+	data = pout->pll_misc1;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_MISC1, data);
+
+	data = pin->pll_ie_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IE_TRIM, data);
+
+	data = pin->pll_ip_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IP_TRIM, data);
+
+	data = ((pin->pll_cpmset_cur << 3) | pin->pll_cpcset_cur);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_CP_SET_CUR, data);
+
+	data = ((pin->pll_icpcset_p << 3) | pin->pll_icpcset_m);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPCSET, data);
+
+	data = ((pin->pll_icpmset_p << 3) | pin->pll_icpcset_m);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICPMSET, data);
+
+	data = ((pin->pll_icpmset << 3) | pin->pll_icpcset);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_ICP_SET, data);
+
+	data = ((pdb->in.pll_lpf_cap2 << 4) | pdb->in.pll_lpf_cap1);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF1, data);
+
+	data = pin->pll_iptat_trim;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_IPTAT_TRIM, data);
+
+	data = (pdb->in.pll_c3ctrl | (pdb->in.pll_r3ctrl << 4));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_CRCTRL, data);
+}
+
+static void pll_db_commit_8996(struct mdss_pll_resources *pll,
+					struct dsi_pll_db *pdb)
+{
+	void __iomem *pll_base = pll->pll_base;
+	struct dsi_pll_input *pin = &pdb->in;
+	struct dsi_pll_output *pout = &pdb->out;
+	char data;
+
+	data = pout->cmn_ldo_cntrl;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_LDO_CNTRL, data);
+
+	pll_db_commit_common(pll, pdb);
+
+	/* de assert pll start and apply pll sw reset */
+	/* stop pll */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_PLL_CNTRL, 0);
+
+	/* pll sw reset */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0x20);
+	wmb();	/* make sure register committed */
+	udelay(10);
+
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_1, 0);
+	wmb();	/* make sure register committed */
+
+	data = pdb->in.dsiclk_sel; /* set dsiclk_sel = 1  */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG1, data);
+
+	data = 0xff; /* data, clk, pll normal operation */
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CTRL_0, data);
+
+	/* confgiure the frequency dependent pll registers */
+	data = pout->dec_start;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DEC_START, data);
+
+	data = pout->div_frac_start;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START1, data);
+	data = (pout->div_frac_start >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START2, data);
+	data = (pout->div_frac_start >> 16);
+	data &= 0x0f;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_DIV_FRAC_START3, data);
+
+	data = pout->plllock_cmp;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP1, data);
+	data = (pout->plllock_cmp >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP2, data);
+	data = (pout->plllock_cmp >> 16);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP3, data);
+
+	data = ((pin->plllock_cnt << 1) | (pin->plllock_rng << 3));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLLLOCK_CMP_EN, data);
+
+	data = pout->pll_vco_count;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT1, data);
+	data = (pout->pll_vco_count >> 8);
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_VCO_COUNT2, data);
+
+	data = pout->pll_kvco_count;
+	data &= 0x0ff;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT1, data);
+	data = (pout->pll_kvco_count >> 8);
+	data &= 0x03;
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_KVCO_COUNT2, data);
+
+	/*
+	 * tx_band = pll_postdiv
+	 * 0: divided by 1 <== for now
+	 * 1: divided by 2
+	 * 2: divided by 4
+	 * 3: divided by 8
+	 */
+	data = (((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1);
+	MDSS_PLL_REG_W(pll_base, DSIPHY_PLL_PLL_LPF2_POSTDIV, data);
+
+	data = (pout->pll_n1div | (pout->pll_n2div << 4));
+	MDSS_PLL_REG_W(pll_base, DSIPHY_CMN_CLK_CFG0, data);
+
+	if (pll->ssc_en)
+		pll_db_commit_ssc(pll, pdb);
+
+	wmb();	/* make sure register committed */
+}
+
+/*
+ * pll_source_finding:
+ * Both GLBL_TEST_CTRL and CLKBUFLR_EN are configured
+ * at mdss_dsi_8996_phy_config()
+ */
+static int pll_source_finding(struct mdss_pll_resources *pll)
+{
+	u32 clk_buf_en;
+	u32 glbl_test_ctrl;
+
+	glbl_test_ctrl = MDSS_PLL_REG_R(pll->pll_base,
+				DSIPHY_CMN_GLBL_TEST_CTRL);
+	clk_buf_en = MDSS_PLL_REG_R(pll->pll_base,
+				DSIPHY_PLL_CLKBUFLR_EN);
+
+	glbl_test_ctrl &= BIT(2);
+	glbl_test_ctrl >>= 2;
+
+	pr_debug("%s: pll=%d clk_buf_en=%x glbl_test_ctrl=%x\n",
+		__func__, pll->index, clk_buf_en, glbl_test_ctrl);
+
+	clk_buf_en &= (PLL_OUTPUT_RIGHT | PLL_OUTPUT_LEFT);
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+			(clk_buf_en == PLL_OUTPUT_BOTH))
+		return PLL_MASTER;
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_RIGHT) &&
+			(clk_buf_en == PLL_OUTPUT_NONE))
+		return PLL_SLAVE;
+
+	if ((glbl_test_ctrl == PLL_SOURCE_FROM_LEFT) &&
+			(clk_buf_en == PLL_OUTPUT_RIGHT))
+		return PLL_STANDALONE;
+
+	pr_debug("%s: Error pll setup, clk_buf_en=%x glbl_test_ctrl=%x\n",
+			__func__, clk_buf_en, glbl_test_ctrl);
+
+	return PLL_UNKNOWN;
+}
+
+static void pll_source_setup(struct mdss_pll_resources *pll)
+{
+	int status;
+	struct dsi_pll_db *pdb = (struct dsi_pll_db *)pll->priv;
+	struct mdss_pll_resources *other;
+
+	if (pdb->source_setup_done)
+		return;
+
+	pdb->source_setup_done++;
+
+	status = pll_source_finding(pll);
+
+	if (status == PLL_STANDALONE || status == PLL_UNKNOWN)
+		return;
+
+	other = pdb->next->pll;
+	if (!other)
+		return;
+
+	pr_debug("%s: status=%d pll=%d other=%d\n", __func__,
+			status, pll->index, other->index);
+
+	if (status == PLL_MASTER)
+		pll->slave = other;
+	else
+		other->slave = pll;
+}
+
+int pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct mdss_pll_resources *slave;
+	struct dsi_pll_db *pdb;
+
+	pdb = (struct dsi_pll_db *)pll->priv;
+	if (!pdb) {
+		pr_err("No prov found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	pll_source_setup(pll);
+
+	pr_debug("%s: ndx=%d base=%p rate=%lu slave=%p\n", __func__,
+				pll->index, pll->pll_base, rate, pll->slave);
+
+	pll->vco_current_rate = rate;
+	pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	mdss_dsi_pll_8996_input_init(pll, pdb);
+
+	pll_8996_dec_frac_calc(pll, pdb);
+
+	if (pll->ssc_en)
+		pll_8996_ssc_calc(pll, pdb);
+
+	pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+					pll->vco_ref_clk_rate);
+
+	/* commit slave if split display is enabled */
+	slave = pll->slave;
+	if (slave)
+		pll_db_commit_8996(slave, pdb);
+
+	/* commit master itself */
+	pll_db_commit_8996(pll, pdb);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return rc;
+}
+
+static void shadow_pll_dynamic_refresh_8996(struct mdss_pll_resources *pll,
+							struct dsi_pll_db *pdb)
+{
+	struct dsi_pll_output *pout = &pdb->out;
+
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL20,
+		DSIPHY_CMN_CTRL_0, DSIPHY_PLL_SYSCLK_EN_RESET,
+		0xFF, 0x0);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL21,
+		DSIPHY_PLL_DEC_START, DSIPHY_PLL_DIV_FRAC_START1,
+		pout->dec_start, (pout->div_frac_start & 0x0FF));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL22,
+		DSIPHY_PLL_DIV_FRAC_START2, DSIPHY_PLL_DIV_FRAC_START3,
+		((pout->div_frac_start >> 8) & 0x0FF),
+		((pout->div_frac_start >> 16) & 0x0F));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL23,
+		DSIPHY_PLL_PLLLOCK_CMP1, DSIPHY_PLL_PLLLOCK_CMP2,
+		(pout->plllock_cmp & 0x0FF),
+		((pout->plllock_cmp >> 8) & 0x0FF));
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL24,
+		DSIPHY_PLL_PLLLOCK_CMP3, DSIPHY_PLL_PLL_VCO_TUNE,
+		((pout->plllock_cmp >> 16) & 0x03),
+		(pll->cache_pll_trim_codes[1] | BIT(7))); /* VCO tune*/
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL25,
+		DSIPHY_PLL_KVCO_CODE, DSIPHY_PLL_RESETSM_CNTRL,
+		(pll->cache_pll_trim_codes[0] | BIT(5)), 0x38);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL26,
+		DSIPHY_PLL_PLL_LPF2_POSTDIV, DSIPHY_CMN_PLL_CNTRL,
+		(((pout->pll_postdiv - 1) << 4) | pdb->in.pll_lpf_res1), 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL27,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL28,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_DYN_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_CTRL29,
+		DSIPHY_CMN_PLL_CNTRL, DSIPHY_CMN_PLL_CNTRL,
+		0x01, 0x01);
+	MDSS_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR, 0x0000001E);
+	MDSS_PLL_REG_W(pll->dyn_pll_base,
+		DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2, 0x001FFE00);
+
+	/*
+	 * Ensure all the dynamic refresh registers are written before
+	 * dynamic refresh to change the fps is triggered
+	 */
+	wmb();
+}
+
+int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate)
+{
+	int rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+	struct dsi_pll_db *pdb;
+	s64 vco_clk_rate = (s64)rate;
+
+	if (!pll) {
+		pr_err("PLL data not found\n");
+		return -EINVAL;
+	}
+
+	pdb = pll->priv;
+	if (!pdb) {
+		pr_err("No priv data found\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_read_stored_trim_codes(pll, vco_clk_rate);
+	if (rc) {
+		pr_err("cannot find pll codes rate=%lld\n", vco_clk_rate);
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	pr_debug("%s: ndx=%d base=%p rate=%lu\n", __func__,
+			pll->index, pll->pll_base, rate);
+
+	pll->vco_current_rate = rate;
+	pll->vco_ref_clk_rate = vco->ref_clk_rate;
+
+	mdss_dsi_pll_8996_input_init(pll, pdb);
+
+	pll_8996_dec_frac_calc(pll, pdb);
+
+	pll_8996_calc_vco_count(pdb, pll->vco_current_rate,
+			pll->vco_ref_clk_rate);
+
+	shadow_pll_dynamic_refresh_8996(pll, pdb);
+
+	rc = mdss_pll_resource_enable(pll, false);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi plla=%d\n", pll->index);
+		return rc;
+	}
+
+	return rc;
+}
+
+unsigned long pll_vco_get_rate_8996(struct clk *c)
+{
+	u64 vco_rate, multiplier = BIT(20);
+	s32 div_frac_start;
+	u32 dec_start;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	u64 ref_clk = vco->ref_clk_rate;
+	int rc;
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (is_gdsc_disabled(pll))
+		return 0;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+		return rc;
+	}
+
+	dec_start = MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DEC_START);
+	dec_start &= 0x0ff;
+	pr_debug("dec_start = 0x%x\n", dec_start);
+
+	div_frac_start = (MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START3) & 0x0f) << 16;
+	div_frac_start |= (MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START2) & 0x0ff) << 8;
+	div_frac_start |= MDSS_PLL_REG_R(pll->pll_base,
+			DSIPHY_PLL_DIV_FRAC_START1) & 0x0ff;
+	pr_debug("div_frac_start = 0x%x\n", div_frac_start);
+
+	vco_rate = ref_clk * dec_start;
+	vco_rate += ((ref_clk * div_frac_start) / multiplier);
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(pll, false);
+
+	return (unsigned long)vco_rate;
+}
+
+long pll_vco_round_rate_8996(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	u32 div;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+	div = vco->min_rate / rate;
+	if (div > 15) {
+		/* rate < 86.67 Mhz */
+		pr_err("rate=%lu NOT supportted\n", rate);
+		return -EINVAL;
+	}
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	return rrate;
+}
+
+enum handoff pll_vco_handoff_8996(struct clk *c)
+{
+	int rc;
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (is_gdsc_disabled(pll))
+		return HANDOFF_DISABLED_CLK;
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll=%d\n", pll->index);
+		return ret;
+	}
+
+	if (pll_is_pll_locked_8996(pll)) {
+		pll->handoff_resources = true;
+		pll->pll_on = true;
+		c->rate = pll_vco_get_rate_8996(c);
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		mdss_pll_resource_enable(pll, false);
+	}
+
+	return ret;
+}
+
+enum handoff shadow_pll_vco_handoff_8996(struct clk *c)
+{
+	return HANDOFF_DISABLED_CLK;
+}
+
+int pll_vco_prepare_8996(struct clk *c)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("Dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(pll, true);
+	if (rc) {
+		pr_err("ndx=%d Failed to enable mdss dsi pll resources\n",
+							pll->index);
+		return rc;
+	}
+
+	if ((pll->vco_cached_rate != 0)
+	    && (pll->vco_cached_rate == c->rate)) {
+		rc = c->ops->set_rate(c, pll->vco_cached_rate);
+		if (rc) {
+			pr_err("index=%d vco_set_rate failed. rc=%d\n",
+					rc, pll->index);
+			mdss_pll_resource_enable(pll, false);
+			goto error;
+		}
+	}
+
+	rc = dsi_pll_enable(c);
+
+	if (rc) {
+		mdss_pll_resource_enable(pll, false);
+		pr_err("ndx=%d failed to enable dsi pll\n", pll->index);
+	}
+
+error:
+	return rc;
+}
+
+void pll_vco_unprepare_8996(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *pll = vco->priv;
+
+	if (!pll) {
+		pr_err("Dsi pll resources are not available\n");
+		return;
+	}
+
+	pll->vco_cached_rate = c->rate;
+	dsi_pll_disable(c);
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c
new file mode 100644
index 0000000..0a81e02
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.c
@@ -0,0 +1,548 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/workqueue.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dsi-pll-8996.h"
+
+#define VCO_DELAY_USEC		1
+
+static struct dsi_pll_db pll_db[DSI_PLL_NUM];
+
+static const struct clk_ops n2_clk_src_ops;
+static const struct clk_ops shadow_n2_clk_src_ops;
+static const struct clk_ops byte_clk_src_ops;
+static const struct clk_ops post_n1_div_clk_src_ops;
+static const struct clk_ops shadow_post_n1_div_clk_src_ops;
+
+static const struct clk_ops clk_ops_gen_mux_dsi;
+
+/* Op structures */
+static const struct clk_ops clk_ops_dsi_vco = {
+	.set_rate = pll_vco_set_rate_8996,
+	.round_rate = pll_vco_round_rate_8996,
+	.handoff = pll_vco_handoff_8996,
+	.prepare = pll_vco_prepare_8996,
+	.unprepare = pll_vco_unprepare_8996,
+};
+
+static struct clk_div_ops post_n1_div_ops = {
+	.set_div = post_n1_div_set_div,
+	.get_div = post_n1_div_get_div,
+};
+
+static struct clk_div_ops n2_div_ops = {	/* hr_oclk3 */
+	.set_div = n2_div_set_div,
+	.get_div = n2_div_get_div,
+};
+
+static struct clk_mux_ops mdss_byte_mux_ops = {
+	.set_mux_sel = set_mdss_byte_mux_sel_8996,
+	.get_mux_sel = get_mdss_byte_mux_sel_8996,
+};
+
+static struct clk_mux_ops mdss_pixel_mux_ops = {
+	.set_mux_sel = set_mdss_pixel_mux_sel_8996,
+	.get_mux_sel = get_mdss_pixel_mux_sel_8996,
+};
+
+/* Shadow ops for dynamic refresh */
+static const struct clk_ops clk_ops_shadow_dsi_vco = {
+	.set_rate = shadow_pll_vco_set_rate_8996,
+	.round_rate = pll_vco_round_rate_8996,
+	.handoff = shadow_pll_vco_handoff_8996,
+};
+
+static struct clk_div_ops shadow_post_n1_div_ops = {
+	.set_div = post_n1_div_set_div,
+};
+
+static struct clk_div_ops shadow_n2_div_ops = {
+	.set_div = shadow_n2_div_set_div,
+};
+
+static struct dsi_pll_vco_clk dsi0pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1300000000UL,
+	.max_rate = 2600000000UL,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+	.c = {
+		.dbg_name = "dsi0pll_vco_clk_8996",
+		.ops = &clk_ops_dsi_vco,
+		CLK_INIT(dsi0pll_vco_clk.c),
+	},
+};
+
+static struct dsi_pll_vco_clk dsi0pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000u,
+	.min_rate = 1300000000u,
+	.max_rate = 2600000000u,
+	.c = {
+		.dbg_name = "dsi0pll_shadow_vco_clk",
+		.ops = &clk_ops_shadow_dsi_vco,
+		CLK_INIT(dsi0pll_shadow_vco_clk.c),
+	},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
+	.ref_clk_rate = 19200000UL,
+	.min_rate = 1300000000UL,
+	.max_rate = 2600000000UL,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+	.c = {
+		.dbg_name = "dsi1pll_vco_clk_8996",
+		.ops = &clk_ops_dsi_vco,
+		CLK_INIT(dsi1pll_vco_clk.c),
+	},
+};
+
+static struct dsi_pll_vco_clk dsi1pll_shadow_vco_clk = {
+	.ref_clk_rate = 19200000u,
+	.min_rate = 1300000000u,
+	.max_rate = 2600000000u,
+	.pll_en_seq_cnt = 1,
+	.pll_enable_seqs[0] = dsi_pll_enable_seq_8996,
+	.c = {
+		.dbg_name = "dsi1pll_shadow_vco_clk",
+		.ops = &clk_ops_shadow_dsi_vco,
+		CLK_INIT(dsi1pll_shadow_vco_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_post_n1_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &post_n1_div_ops,
+	.c = {
+		.parent = &dsi0pll_vco_clk.c,
+		.dbg_name = "dsi0pll_post_n1_div_clk",
+		.ops = &post_n1_div_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_post_n1_div_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_shadow_post_n1_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_post_n1_div_ops,
+	.c = {
+		.parent = &dsi0pll_shadow_vco_clk.c,
+		.dbg_name = "dsi0pll_shadow_post_n1_div_clk",
+		.ops = &shadow_post_n1_div_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_shadow_post_n1_div_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_post_n1_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &post_n1_div_ops,
+	.c = {
+		.parent = &dsi1pll_vco_clk.c,
+		.dbg_name = "dsi1pll_post_n1_div_clk",
+		.ops = &post_n1_div_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_post_n1_div_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_shadow_post_n1_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_post_n1_div_ops,
+	.c = {
+		.parent = &dsi1pll_shadow_vco_clk.c,
+		.dbg_name = "dsi1pll_shadow_post_n1_div_clk",
+		.ops = &shadow_post_n1_div_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_shadow_post_n1_div_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_n2_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &n2_div_ops,
+	.c = {
+		.parent = &dsi0pll_post_n1_div_clk.c,
+		.dbg_name = "dsi0pll_n2_div_clk",
+		.ops = &n2_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_n2_div_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_shadow_n2_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_n2_div_ops,
+	.c = {
+		.parent = &dsi0pll_shadow_post_n1_div_clk.c,
+		.dbg_name = "dsi0pll_shadow_n2_div_clk",
+		.ops = &shadow_n2_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_shadow_n2_div_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_n2_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &n2_div_ops,
+	.c = {
+		.parent = &dsi1pll_post_n1_div_clk.c,
+		.dbg_name = "dsi1pll_n2_div_clk",
+		.ops = &n2_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_n2_div_clk.c),
+	},
+};
+
+static struct div_clk dsi1pll_shadow_n2_div_clk = {
+	.data = {
+		.max_div = 15,
+		.min_div = 1,
+	},
+	.ops = &shadow_n2_div_ops,
+	.c = {
+		.parent = &dsi1pll_shadow_post_n1_div_clk.c,
+		.dbg_name = "dsi1pll_shadow_n2_div_clk",
+		.ops = &shadow_n2_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_shadow_n2_div_clk.c),
+	},
+};
+
+static struct div_clk dsi0pll_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi0pll_n2_div_clk.c,
+		.dbg_name = "dsi0pll_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pixel_clk_src.c),
+	},
+};
+
+static struct div_clk dsi0pll_shadow_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi0pll_shadow_n2_div_clk.c,
+		.dbg_name = "dsi0pll_shadow_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_shadow_pixel_clk_src.c),
+	},
+};
+
+static struct div_clk dsi1pll_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi1pll_n2_div_clk.c,
+		.dbg_name = "dsi1pll_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pixel_clk_src.c),
+	},
+};
+
+static struct div_clk dsi1pll_shadow_pixel_clk_src = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &dsi1pll_shadow_n2_div_clk.c,
+		.dbg_name = "dsi1pll_shadow_pixel_clk_src",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_shadow_pixel_clk_src.c),
+	},
+};
+
+static struct mux_clk dsi0pll_pixel_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_pixel_clk_src.c, 0},
+		{&dsi0pll_shadow_pixel_clk_src.c, 1},
+	},
+	.ops = &mdss_pixel_mux_ops,
+	.c = {
+		.parent = &dsi0pll_pixel_clk_src.c,
+		.dbg_name = "dsi0pll_pixel_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_pixel_clk_mux.c),
+	}
+};
+
+static struct mux_clk dsi1pll_pixel_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_pixel_clk_src.c, 0},
+		{&dsi1pll_shadow_pixel_clk_src.c, 1},
+	},
+	.ops = &mdss_pixel_mux_ops,
+	.c = {
+		.parent = &dsi1pll_pixel_clk_src.c,
+		.dbg_name = "dsi1pll_pixel_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_pixel_clk_mux.c),
+	}
+};
+
+static struct div_clk dsi0pll_byte_clk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi0pll_post_n1_div_clk.c,
+		.dbg_name = "dsi0pll_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(dsi0pll_byte_clk_src.c),
+	},
+};
+
+static struct div_clk dsi0pll_shadow_byte_clk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi0pll_shadow_post_n1_div_clk.c,
+		.dbg_name = "dsi0pll_shadow_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(dsi0pll_shadow_byte_clk_src.c),
+	},
+};
+
+static struct div_clk dsi1pll_byte_clk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi1pll_post_n1_div_clk.c,
+		.dbg_name = "dsi1pll_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(dsi1pll_byte_clk_src.c),
+	},
+};
+
+static struct div_clk dsi1pll_shadow_byte_clk_src = {
+	.data = {
+		.div = 8,
+		.min_div = 8,
+		.max_div = 8,
+	},
+	.c = {
+		.parent = &dsi1pll_shadow_post_n1_div_clk.c,
+		.dbg_name = "dsi1pll_shadow_byte_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(dsi1pll_shadow_byte_clk_src.c),
+	},
+};
+
+static struct mux_clk dsi0pll_byte_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi0pll_byte_clk_src.c, 0},
+		{&dsi0pll_shadow_byte_clk_src.c, 1},
+	},
+	.ops = &mdss_byte_mux_ops,
+	.c = {
+		.parent = &dsi0pll_byte_clk_src.c,
+		.dbg_name = "dsi0pll_byte_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi0pll_byte_clk_mux.c),
+	}
+};
+static struct mux_clk dsi1pll_byte_clk_mux = {
+	.num_parents = 2,
+	.parents = (struct clk_src[]) {
+		{&dsi1pll_byte_clk_src.c, 0},
+		{&dsi1pll_shadow_byte_clk_src.c, 1},
+	},
+	.ops = &mdss_byte_mux_ops,
+	.c = {
+		.parent = &dsi1pll_byte_clk_src.c,
+		.dbg_name = "dsi1pll_byte_clk_mux",
+		.ops = &clk_ops_gen_mux_dsi,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(dsi1pll_byte_clk_mux.c),
+	}
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8996[] = {
+	CLK_LIST(dsi0pll_byte_clk_mux),
+	CLK_LIST(dsi0pll_byte_clk_src),
+	CLK_LIST(dsi0pll_pixel_clk_mux),
+	CLK_LIST(dsi0pll_pixel_clk_src),
+	CLK_LIST(dsi0pll_n2_div_clk),
+	CLK_LIST(dsi0pll_post_n1_div_clk),
+	CLK_LIST(dsi0pll_vco_clk),
+	CLK_LIST(dsi0pll_shadow_byte_clk_src),
+	CLK_LIST(dsi0pll_shadow_pixel_clk_src),
+	CLK_LIST(dsi0pll_shadow_n2_div_clk),
+	CLK_LIST(dsi0pll_shadow_post_n1_div_clk),
+	CLK_LIST(dsi0pll_shadow_vco_clk),
+};
+
+static struct clk_lookup mdss_dsi_pllcc_8996_1[] = {
+	CLK_LIST(dsi1pll_byte_clk_mux),
+	CLK_LIST(dsi1pll_byte_clk_src),
+	CLK_LIST(dsi1pll_pixel_clk_mux),
+	CLK_LIST(dsi1pll_pixel_clk_src),
+	CLK_LIST(dsi1pll_n2_div_clk),
+	CLK_LIST(dsi1pll_post_n1_div_clk),
+	CLK_LIST(dsi1pll_vco_clk),
+	CLK_LIST(dsi1pll_shadow_byte_clk_src),
+	CLK_LIST(dsi1pll_shadow_pixel_clk_src),
+	CLK_LIST(dsi1pll_shadow_n2_div_clk),
+	CLK_LIST(dsi1pll_shadow_post_n1_div_clk),
+	CLK_LIST(dsi1pll_shadow_vco_clk),
+};
+
+int dsi_pll_clock_register_8996(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0, ndx;
+	int const ssc_freq_default = 31500; /* default h/w recommended value */
+	int const ssc_ppm_default = 5000; /* default h/w recommended value */
+	struct dsi_pll_db *pdb;
+
+	if (pll_res->index >= DSI_PLL_NUM) {
+		pr_err("pll ndx=%d is NOT supported\n", pll_res->index);
+		return -EINVAL;
+	}
+
+	ndx = pll_res->index;
+	pdb = &pll_db[ndx];
+	pll_res->priv = pdb;
+	pdb->pll = pll_res;
+	ndx++;
+	ndx %= DSI_PLL_NUM;
+	pdb->next = &pll_db[ndx];
+
+	/* Set clock source operations */
+
+	/* hr_oclk3, pixel */
+	n2_clk_src_ops = clk_ops_slave_div;
+	n2_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+	shadow_n2_clk_src_ops = clk_ops_slave_div;
+
+	/* hr_ockl2, byte, vco pll */
+	post_n1_div_clk_src_ops = clk_ops_div;
+	post_n1_div_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+	shadow_post_n1_div_clk_src_ops = clk_ops_div;
+
+	byte_clk_src_ops = clk_ops_div;
+	byte_clk_src_ops.prepare = mdss_pll_div_prepare;
+
+	clk_ops_gen_mux_dsi = clk_ops_gen_mux;
+	clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
+	clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
+
+	if (pll_res->ssc_en) {
+		if (!pll_res->ssc_freq)
+			pll_res->ssc_freq = ssc_freq_default;
+		if (!pll_res->ssc_ppm)
+			pll_res->ssc_ppm = ssc_ppm_default;
+	}
+
+	/* Set client data to mux, div and vco clocks.  */
+	if (pll_res->index == DSI_PLL_1) {
+		dsi1pll_byte_clk_src.priv = pll_res;
+		dsi1pll_pixel_clk_src.priv = pll_res;
+		dsi1pll_post_n1_div_clk.priv = pll_res;
+		dsi1pll_n2_div_clk.priv = pll_res;
+		dsi1pll_vco_clk.priv = pll_res;
+
+		dsi1pll_shadow_byte_clk_src.priv = pll_res;
+		dsi1pll_shadow_pixel_clk_src.priv = pll_res;
+		dsi1pll_shadow_post_n1_div_clk.priv = pll_res;
+		dsi1pll_shadow_n2_div_clk.priv = pll_res;
+		dsi1pll_shadow_vco_clk.priv = pll_res;
+
+		pll_res->vco_delay = VCO_DELAY_USEC;
+		rc = of_msm_clock_register(pdev->dev.of_node,
+				mdss_dsi_pllcc_8996_1,
+				ARRAY_SIZE(mdss_dsi_pllcc_8996_1));
+	} else {
+		dsi0pll_byte_clk_src.priv = pll_res;
+		dsi0pll_pixel_clk_src.priv = pll_res;
+		dsi0pll_post_n1_div_clk.priv = pll_res;
+		dsi0pll_n2_div_clk.priv = pll_res;
+		dsi0pll_vco_clk.priv = pll_res;
+
+		dsi0pll_shadow_byte_clk_src.priv = pll_res;
+		dsi0pll_shadow_pixel_clk_src.priv = pll_res;
+		dsi0pll_shadow_post_n1_div_clk.priv = pll_res;
+		dsi0pll_shadow_n2_div_clk.priv = pll_res;
+		dsi0pll_shadow_vco_clk.priv = pll_res;
+
+		pll_res->vco_delay = VCO_DELAY_USEC;
+		rc = of_msm_clock_register(pdev->dev.of_node,
+				mdss_dsi_pllcc_8996,
+				ARRAY_SIZE(mdss_dsi_pllcc_8996));
+	}
+
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d clocks successfully\n",
+						pll_res->index);
+	}
+
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h
new file mode 100644
index 0000000..4f66560
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-8996.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef MDSS_DSI_PLL_8996_H
+#define MDSS_DSI_PLL_8996_H
+
+#define DSIPHY_CMN_CLK_CFG0		0x0010
+#define DSIPHY_CMN_CLK_CFG1		0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL	0x0018
+
+#define DSIPHY_CMN_PLL_CNTRL		0x0048
+#define DSIPHY_CMN_CTRL_0		0x001c
+#define DSIPHY_CMN_CTRL_1		0x0020
+
+#define DSIPHY_CMN_LDO_CNTRL		0x004c
+
+#define DSIPHY_PLL_IE_TRIM		0x0400
+#define DSIPHY_PLL_IP_TRIM		0x0404
+
+#define DSIPHY_PLL_IPTAT_TRIM		0x0410
+
+#define DSIPHY_PLL_CLKBUFLR_EN		0x041c
+
+#define DSIPHY_PLL_SYSCLK_EN_RESET	0x0428
+#define DSIPHY_PLL_RESETSM_CNTRL	0x042c
+#define DSIPHY_PLL_RESETSM_CNTRL2	0x0430
+#define DSIPHY_PLL_RESETSM_CNTRL3	0x0434
+#define DSIPHY_PLL_RESETSM_CNTRL4	0x0438
+#define DSIPHY_PLL_RESETSM_CNTRL5	0x043c
+#define DSIPHY_PLL_KVCO_DIV_REF1	0x0440
+#define DSIPHY_PLL_KVCO_DIV_REF2	0x0444
+#define DSIPHY_PLL_KVCO_COUNT1		0x0448
+#define DSIPHY_PLL_KVCO_COUNT2		0x044c
+#define DSIPHY_PLL_VREF_CFG1		0x045c
+
+#define DSIPHY_PLL_KVCO_CODE		0x0458
+
+#define DSIPHY_PLL_VCO_DIV_REF1		0x046c
+#define DSIPHY_PLL_VCO_DIV_REF2		0x0470
+#define DSIPHY_PLL_VCO_COUNT1		0x0474
+#define DSIPHY_PLL_VCO_COUNT2		0x0478
+#define DSIPHY_PLL_PLLLOCK_CMP1		0x047c
+#define DSIPHY_PLL_PLLLOCK_CMP2		0x0480
+#define DSIPHY_PLL_PLLLOCK_CMP3		0x0484
+#define DSIPHY_PLL_PLLLOCK_CMP_EN	0x0488
+#define DSIPHY_PLL_PLL_VCO_TUNE		0x048C
+#define DSIPHY_PLL_DEC_START		0x0490
+#define DSIPHY_PLL_SSC_EN_CENTER	0x0494
+#define DSIPHY_PLL_SSC_ADJ_PER1		0x0498
+#define DSIPHY_PLL_SSC_ADJ_PER2		0x049c
+#define DSIPHY_PLL_SSC_PER1		0x04a0
+#define DSIPHY_PLL_SSC_PER2		0x04a4
+#define DSIPHY_PLL_SSC_STEP_SIZE1	0x04a8
+#define DSIPHY_PLL_SSC_STEP_SIZE2	0x04ac
+#define DSIPHY_PLL_DIV_FRAC_START1	0x04b4
+#define DSIPHY_PLL_DIV_FRAC_START2	0x04b8
+#define DSIPHY_PLL_DIV_FRAC_START3	0x04bc
+#define DSIPHY_PLL_TXCLK_EN		0x04c0
+#define DSIPHY_PLL_PLL_CRCTRL		0x04c4
+
+#define DSIPHY_PLL_RESET_SM_READY_STATUS 0x04cc
+
+#define DSIPHY_PLL_PLL_MISC1		0x04e8
+
+#define DSIPHY_PLL_CP_SET_CUR		0x04f0
+#define DSIPHY_PLL_PLL_ICPMSET		0x04f4
+#define DSIPHY_PLL_PLL_ICPCSET		0x04f8
+#define DSIPHY_PLL_PLL_ICP_SET		0x04fc
+#define DSIPHY_PLL_PLL_LPF1		0x0500
+#define DSIPHY_PLL_PLL_LPF2_POSTDIV	0x0504
+#define DSIPHY_PLL_PLL_BANDGAP	0x0508
+
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15		0x050
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19		0x060
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20		0x064
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21		0x068
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22		0x06C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23		0x070
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24		0x074
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25		0x078
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26		0x07C
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27		0x080
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28		0x084
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29		0x088
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR	0x094
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2	0x098
+
+struct dsi_pll_input {
+	u32 fref;	/* 19.2 Mhz, reference clk */
+	u32 fdata;	/* bit clock rate */
+	u32 dsiclk_sel; /* 1, reg: 0x0014 */
+	u32 n2div;	/* 1, reg: 0x0010, bit 4-7 */
+	u32 ssc_en;	/* 1, reg: 0x0494, bit 0 */
+	u32 ldo_en;	/* 0,  reg: 0x004c, bit 0 */
+
+	/* fixed  */
+	u32 refclk_dbler_en;	/* 0, reg: 0x04c0, bit 1 */
+	u32 vco_measure_time;	/* 5, unknown */
+	u32 kvco_measure_time;	/* 5, unknown */
+	u32 bandgap_timer;	/* 4, reg: 0x0430, bit 3 - 5 */
+	u32 pll_wakeup_timer;	/* 5, reg: 0x043c, bit 0 - 2 */
+	u32 plllock_cnt;	/* 1, reg: 0x0488, bit 1 - 2 */
+	u32 plllock_rng;	/* 1, reg: 0x0488, bit 3 - 4 */
+	u32 ssc_center;		/* 0, reg: 0x0494, bit 1 */
+	u32 ssc_adj_period;	/* 37, reg: 0x498, bit 0 - 9 */
+	u32 ssc_spread;		/* 0.005  */
+	u32 ssc_freq;		/* unknown */
+	u32 pll_ie_trim;	/* 4, reg: 0x0400 */
+	u32 pll_ip_trim;	/* 4, reg: 0x0404 */
+	u32 pll_iptat_trim;	/* reg: 0x0410 */
+	u32 pll_cpcset_cur;	/* 1, reg: 0x04f0, bit 0 - 2 */
+	u32 pll_cpmset_cur;	/* 1, reg: 0x04f0, bit 3 - 5 */
+
+	u32 pll_icpmset;	/* 4, reg: 0x04fc, bit 3 - 5 */
+	u32 pll_icpcset;	/* 4, reg: 0x04fc, bit 0 - 2 */
+
+	u32 pll_icpmset_p;	/* 0, reg: 0x04f4, bit 0 - 2 */
+	u32 pll_icpmset_m;	/* 0, reg: 0x04f4, bit 3 - 5 */
+
+	u32 pll_icpcset_p;	/* 0, reg: 0x04f8, bit 0 - 2 */
+	u32 pll_icpcset_m;	/* 0, reg: 0x04f8, bit 3 - 5 */
+
+	u32 pll_lpf_res1;	/* 3, reg: 0x0504, bit 0 - 3 */
+	u32 pll_lpf_cap1;	/* 11, reg: 0x0500, bit 0 - 3 */
+	u32 pll_lpf_cap2;	/* 1, reg: 0x0500, bit 4 - 7 */
+	u32 pll_c3ctrl;		/* 2, reg: 0x04c4 */
+	u32 pll_r3ctrl;		/* 1, reg: 0x04c4 */
+};
+
+struct dsi_pll_output {
+	u32 pll_txclk_en;	/* reg: 0x04c0 */
+	u32 dec_start;		/* reg: 0x0490 */
+	u32 div_frac_start;	/* reg: 0x04b4, 0x4b8, 0x04bc */
+	u32 ssc_period;		/* reg: 0x04a0, 0x04a4 */
+	u32 ssc_step_size;	/* reg: 0x04a8, 0x04ac */
+	u32 plllock_cmp;	/* reg: 0x047c, 0x0480, 0x0484 */
+	u32 pll_vco_div_ref;	/* reg: 0x046c, 0x0470 */
+	u32 pll_vco_count;	/* reg: 0x0474, 0x0478 */
+	u32 pll_kvco_div_ref;	/* reg: 0x0440, 0x0444 */
+	u32 pll_kvco_count;	/* reg: 0x0448, 0x044c */
+	u32 pll_misc1;		/* reg: 0x04e8 */
+	u32 pll_lpf2_postdiv;	/* reg: 0x0504 */
+	u32 pll_resetsm_cntrl;	/* reg: 0x042c */
+	u32 pll_resetsm_cntrl2;	/* reg: 0x0430 */
+	u32 pll_resetsm_cntrl5;	/* reg: 0x043c */
+	u32 pll_kvco_code;		/* reg: 0x0458 */
+
+	u32 cmn_clk_cfg0;	/* reg: 0x0010 */
+	u32 cmn_clk_cfg1;	/* reg: 0x0014 */
+	u32 cmn_ldo_cntrl;	/* reg: 0x004c */
+
+	u32 pll_postdiv;	/* vco */
+	u32 pll_n1div;		/* vco */
+	u32 pll_n2div;		/* hr_oclk3, pixel */
+	u32 fcvo;
+};
+
+enum {
+	DSI_PLL_0,
+	DSI_PLL_1,
+	DSI_PLL_NUM
+};
+
+struct dsi_pll_db {
+	struct dsi_pll_db *next;
+	struct mdss_pll_resources *pll;
+	struct dsi_pll_input in;
+	struct dsi_pll_output out;
+	int source_setup_done;
+};
+
+enum {
+	PLL_OUTPUT_NONE,
+	PLL_OUTPUT_RIGHT,
+	PLL_OUTPUT_LEFT,
+	PLL_OUTPUT_BOTH
+};
+
+enum {
+	PLL_SOURCE_FROM_LEFT,
+	PLL_SOURCE_FROM_RIGHT
+};
+
+enum {
+	PLL_UNKNOWN,
+	PLL_STANDALONE,
+	PLL_SLAVE,
+	PLL_MASTER
+};
+
+int pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
+long pll_vco_round_rate_8996(struct clk *c, unsigned long rate);
+enum handoff pll_vco_handoff_8996(struct clk *c);
+enum handoff shadow_pll_vco_handoff_8996(struct clk *c);
+int shadow_post_n1_div_set_div(struct div_clk *clk, int div);
+int shadow_post_n1_div_get_div(struct div_clk *clk);
+int shadow_n2_div_set_div(struct div_clk *clk, int div);
+int shadow_n2_div_get_div(struct div_clk *clk);
+int shadow_pll_vco_set_rate_8996(struct clk *c, unsigned long rate);
+int pll_vco_prepare_8996(struct clk *c);
+void pll_vco_unprepare_8996(struct clk *c);
+int set_mdss_byte_mux_sel_8996(struct mux_clk *clk, int sel);
+int get_mdss_byte_mux_sel_8996(struct mux_clk *clk);
+int set_mdss_pixel_mux_sel_8996(struct mux_clk *clk, int sel);
+int get_mdss_pixel_mux_sel_8996(struct mux_clk *clk);
+int post_n1_div_set_div(struct div_clk *clk, int div);
+int post_n1_div_get_div(struct div_clk *clk);
+int n2_div_set_div(struct div_clk *clk, int div);
+int n2_div_get_div(struct div_clk *clk);
+int dsi_pll_enable_seq_8996(struct mdss_pll_resources *pll);
+
+#endif  /* MDSS_DSI_PLL_8996_H */
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c
new file mode 100644
index 0000000..38afe9d
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-util.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+
+#define DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG	(0x0)
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG	(0x0004)
+#define DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG	(0x0008)
+#define DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG	(0x000C)
+#define DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG		(0x0010)
+#define DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG	(0x0014)
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG	(0x0024)
+#define DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG	(0x0028)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG		(0x002C)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG	(0x0030)
+#define DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG	(0x0034)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0		(0x0038)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1		(0x003C)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2		(0x0040)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3		(0x0044)
+#define DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4		(0x0048)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0		(0x006C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG2		(0x0074)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3		(0x0078)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4		(0x007C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG5		(0x0080)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6		(0x0084)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7		(0x0088)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8		(0x008C)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9		(0x0090)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10	(0x0094)
+#define DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11	(0x0098)
+#define DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG	(0x009C)
+#define DSI_PHY_PLL_UNIPHY_PLL_STATUS		(0x00C0)
+
+#define DSI_PLL_POLL_DELAY_US			50
+#define DSI_PLL_POLL_TIMEOUT_US			500
+
+int set_byte_mux_sel(struct mux_clk *clk, int sel)
+{
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	pr_debug("byte mux set to %s mode\n", sel ? "indirect" : "direct");
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG, (sel << 1));
+
+	return 0;
+}
+
+int get_byte_mux_sel(struct mux_clk *clk)
+{
+	int mux_mode, rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	mux_mode = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_VREG_CFG) & BIT(1);
+
+	pr_debug("byte mux mode = %s\n", mux_mode ? "indirect" : "direct");
+	mdss_pll_resource_enable(dsi_pll_res, false);
+
+	return !!mux_mode;
+}
+
+int dsi_pll_div_prepare(struct clk *c)
+{
+	struct div_clk *div = to_div_clk(c);
+	/* Restore the divider's value */
+	return div->ops->set_div(div, div->data.div);
+}
+
+int dsi_pll_mux_prepare(struct clk *c)
+{
+	struct mux_clk *mux = to_mux_clk(c);
+	int i, rc, sel = 0;
+	struct mdss_pll_resources *dsi_pll_res = mux->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	for (i = 0; i < mux->num_parents; i++)
+		if (mux->parents[i].src == c->parent) {
+			sel = mux->parents[i].sel;
+			break;
+		}
+
+	if (i == mux->num_parents) {
+		pr_err("Failed to select the parent clock\n");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	/* Restore the mux source select value */
+	rc = mux->ops->set_mux_sel(mux, sel);
+
+error:
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+int fixed_4div_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, (div - 1));
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+int fixed_4div_get_div(struct div_clk *clk)
+{
+	int div = 0, rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return div + 1;
+}
+
+int digital_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, (div - 1));
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+int digital_get_div(struct div_clk *clk)
+{
+	int div = 0, rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return div + 1;
+}
+
+int analog_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, div - 1);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	return rc;
+}
+
+int analog_get_div(struct div_clk *clk)
+{
+	int div = 0, rc;
+	struct mdss_pll_resources *dsi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(clk->priv, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+		DSI_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG) + 1;
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+
+	return div;
+}
+
+int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res)
+{
+	u32 status;
+	int pll_locked;
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((dsi_pll_res->pll_base +
+			DSI_PHY_PLL_UNIPHY_PLL_STATUS),
+			status,
+			((status & BIT(0)) == 1),
+			DSI_PLL_POLL_DELAY_US,
+			DSI_PLL_POLL_TIMEOUT_US)) {
+		pr_debug("DSI PLL status=%x failed to Lock\n", status);
+		pll_locked = 0;
+	} else {
+		pll_locked = 1;
+	}
+
+	return pll_locked;
+}
+
+int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate)
+{
+	s64 vco_clk_rate = rate;
+	s32 rem;
+	s64 refclk_cfg, frac_n_mode, ref_doubler_en_b;
+	s64 ref_clk_to_pll, div_fbx1000, frac_n_value;
+	s64 sdm_cfg0, sdm_cfg1, sdm_cfg2, sdm_cfg3;
+	s64 gen_vco_clk, cal_cfg10, cal_cfg11;
+	u32 res;
+	int i;
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	/* Configure the Loop filter resistance */
+	for (i = 0; i < vco->lpfr_lut_size; i++)
+		if (vco_clk_rate <= vco->lpfr_lut[i].vco_rate)
+			break;
+	if (i == vco->lpfr_lut_size) {
+		pr_err("unable to get loop filter resistance. vco=%ld\n", rate);
+		return -EINVAL;
+	}
+	res = vco->lpfr_lut[i].r;
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_LPFR_CFG, res);
+
+	/* Loop filter capacitance values : c1 and c2 */
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_LPFC1_CFG, 0x70);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_LPFC2_CFG, 0x15);
+
+	div_s64_rem(vco_clk_rate, vco->ref_clk_rate, &rem);
+	if (rem) {
+		refclk_cfg = 0x1;
+		frac_n_mode = 1;
+		ref_doubler_en_b = 0;
+	} else {
+		refclk_cfg = 0x0;
+		frac_n_mode = 0;
+		ref_doubler_en_b = 1;
+	}
+
+	pr_debug("refclk_cfg = %lld\n", refclk_cfg);
+
+	ref_clk_to_pll = ((vco->ref_clk_rate * 2 * (refclk_cfg))
+			  + (ref_doubler_en_b * vco->ref_clk_rate));
+	div_fbx1000 = div_s64((vco_clk_rate * 1000), ref_clk_to_pll);
+
+	div_s64_rem(div_fbx1000, 1000, &rem);
+	frac_n_value = div_s64((rem * (1 << 16)), 1000);
+	gen_vco_clk = div_s64(div_fbx1000 * ref_clk_to_pll, 1000);
+
+	pr_debug("ref_clk_to_pll = %lld\n", ref_clk_to_pll);
+	pr_debug("div_fb = %lld\n", div_fbx1000);
+	pr_debug("frac_n_value = %lld\n", frac_n_value);
+
+	pr_debug("Generated VCO Clock: %lld\n", gen_vco_clk);
+	rem = 0;
+	if (frac_n_mode) {
+		sdm_cfg0 = (0x0 << 5);
+		sdm_cfg0 |= (0x0 & 0x3f);
+		sdm_cfg1 = (div_s64(div_fbx1000, 1000) & 0x3f) - 1;
+		sdm_cfg3 = div_s64_rem(frac_n_value, 256, &rem);
+		sdm_cfg2 = rem;
+	} else {
+		sdm_cfg0 = (0x1 << 5);
+		sdm_cfg0 |= (div_s64(div_fbx1000, 1000) & 0x3f) - 1;
+		sdm_cfg1 = (0x0 & 0x3f);
+		sdm_cfg2 = 0;
+		sdm_cfg3 = 0;
+	}
+
+	pr_debug("sdm_cfg0=%lld\n", sdm_cfg0);
+	pr_debug("sdm_cfg1=%lld\n", sdm_cfg1);
+	pr_debug("sdm_cfg2=%lld\n", sdm_cfg2);
+	pr_debug("sdm_cfg3=%lld\n", sdm_cfg3);
+
+	cal_cfg11 = div_s64_rem(gen_vco_clk, 256 * 1000000, &rem);
+	cal_cfg10 = rem / 1000000;
+	pr_debug("cal_cfg10=%lld, cal_cfg11=%lld\n", cal_cfg10, cal_cfg11);
+
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CHGPUMP_CFG, 0x02);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG3, 0x2b);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG4, 0x66);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
+
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+		DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1, (u32)(sdm_cfg1 & 0xff));
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+		DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2, (u32)(sdm_cfg2 & 0xff));
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+		DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3, (u32)(sdm_cfg3 & 0xff));
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
+
+	/* Add hardware recommended delay for correct PLL configuration */
+	if (dsi_pll_res->vco_delay)
+		udelay(dsi_pll_res->vco_delay);
+
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG, (u32)refclk_cfg);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_PWRGEN_CFG, 0x00);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x71);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0, (u32)sdm_cfg0);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x30);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x00);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x00);
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+		DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG10, (u32)(cal_cfg10 & 0xff));
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+		DSI_PHY_PLL_UNIPHY_PLL_CAL_CFG11, (u32)(cal_cfg11 & 0xff));
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_EFUSE_CFG, 0x20);
+
+	return 0;
+}
+
+unsigned long vco_get_rate(struct clk *c)
+{
+	u32 sdm0, doubler, sdm_byp_div;
+	u64 vco_rate;
+	u32 sdm_dc_off, sdm_freq_seed, sdm2, sdm3;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	u64 ref_clk = vco->ref_clk_rate;
+	int rc;
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/* Check to see if the ref clk doubler is enabled */
+	doubler = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+				 DSI_PHY_PLL_UNIPHY_PLL_REFCLK_CFG) & BIT(0);
+	ref_clk += (doubler * vco->ref_clk_rate);
+
+	/* see if it is integer mode or sdm mode */
+	sdm0 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+					DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0);
+	if (sdm0 & BIT(6)) {
+		/* integer mode */
+		sdm_byp_div = (MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG0) & 0x3f) + 1;
+		vco_rate = ref_clk * sdm_byp_div;
+	} else {
+		/* sdm mode */
+		sdm_dc_off = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG1) & 0xFF;
+		pr_debug("sdm_dc_off = %d\n", sdm_dc_off);
+		sdm2 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG2) & 0xFF;
+		sdm3 = MDSS_PLL_REG_R(dsi_pll_res->pll_base,
+			DSI_PHY_PLL_UNIPHY_PLL_SDM_CFG3) & 0xFF;
+		sdm_freq_seed = (sdm3 << 8) | sdm2;
+		pr_debug("sdm_freq_seed = %d\n", sdm_freq_seed);
+
+		vco_rate = (ref_clk * (sdm_dc_off + 1)) +
+			mult_frac(ref_clk, sdm_freq_seed, BIT(16));
+		pr_debug("vco rate = %lld\n", vco_rate);
+	}
+
+	pr_debug("returning vco rate = %lu\n", (unsigned long)vco_rate);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+
+	return (unsigned long)vco_rate;
+}
+
+static int dsi_pll_enable(struct clk *c)
+{
+	int i, rc;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return rc;
+	}
+
+	/* Try all enable sequences until one succeeds */
+	for (i = 0; i < vco->pll_en_seq_cnt; i++) {
+		rc = vco->pll_enable_seqs[i](dsi_pll_res);
+		pr_debug("DSI PLL %s after sequence #%d\n",
+			rc ? "unlocked" : "locked", i + 1);
+		if (!rc)
+			break;
+	}
+
+	if (rc) {
+		mdss_pll_resource_enable(dsi_pll_res, false);
+		pr_err("DSI PLL failed to lock\n");
+	}
+	dsi_pll_res->pll_on = true;
+
+	return rc;
+}
+
+static void dsi_pll_disable(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (!dsi_pll_res->pll_on &&
+		mdss_pll_resource_enable(dsi_pll_res, true)) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return;
+	}
+
+	dsi_pll_res->handoff_resources = false;
+
+	MDSS_PLL_REG_W(dsi_pll_res->pll_base,
+				DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x00);
+
+	mdss_pll_resource_enable(dsi_pll_res, false);
+	dsi_pll_res->pll_on = false;
+
+	pr_debug("DSI PLL Disabled\n");
+}
+
+long vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	return rrate;
+}
+
+enum handoff vco_handoff(struct clk *c)
+{
+	int rc;
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (is_gdsc_disabled(dsi_pll_res))
+		return HANDOFF_DISABLED_CLK;
+
+	rc = mdss_pll_resource_enable(dsi_pll_res, true);
+	if (rc) {
+		pr_err("Failed to enable mdss dsi pll resources\n");
+		return ret;
+	}
+
+	if (dsi_pll_lock_status(dsi_pll_res)) {
+		dsi_pll_res->handoff_resources = true;
+		dsi_pll_res->pll_on = true;
+		c->rate = vco_get_rate(c);
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		mdss_pll_resource_enable(dsi_pll_res, false);
+	}
+
+	return ret;
+}
+
+int vco_prepare(struct clk *c)
+{
+	int rc = 0;
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (!dsi_pll_res) {
+		pr_err("Dsi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	if ((dsi_pll_res->vco_cached_rate != 0)
+	    && (dsi_pll_res->vco_cached_rate == c->rate)) {
+		rc = c->ops->set_rate(c, dsi_pll_res->vco_cached_rate);
+		if (rc) {
+			pr_err("vco_set_rate failed. rc=%d\n", rc);
+			goto error;
+		}
+	}
+
+	rc = dsi_pll_enable(c);
+
+error:
+	return rc;
+}
+
+void vco_unprepare(struct clk *c)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct mdss_pll_resources *dsi_pll_res = vco->priv;
+
+	if (!dsi_pll_res) {
+		pr_err("Dsi pll resources are not available\n");
+		return;
+	}
+
+	dsi_pll_res->vco_cached_rate = c->rate;
+	dsi_pll_disable(c);
+}
+
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll.h b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
new file mode 100644
index 0000000..8c55bba
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDSS_DSI_PLL_H
+#define __MDSS_DSI_PLL_H
+
+#include <linux/clk-provider.h>
+#include "mdss-pll.h"
+#define MAX_DSI_PLL_EN_SEQS	10
+
+/* Register offsets for 20nm PHY PLL */
+#define MMSS_DSI_PHY_PLL_PLL_CNTRL		(0x0014)
+#define MMSS_DSI_PHY_PLL_PLL_BKG_KVCO_CAL_EN	(0x002C)
+#define MMSS_DSI_PHY_PLL_PLLLOCK_CMP_EN		(0x009C)
+
+struct lpfr_cfg {
+	unsigned long vco_rate;
+	u32 r;
+};
+
+struct dsi_pll_vco_clk {
+	struct clk_hw	hw;
+	unsigned long	ref_clk_rate;
+	unsigned long	min_rate;
+	unsigned long	max_rate;
+	u32		pll_en_seq_cnt;
+	struct lpfr_cfg *lpfr_lut;
+	u32		lpfr_lut_size;
+	void		*priv;
+
+	int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
+			(struct mdss_pll_resources *dsi_pll_Res);
+};
+
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int dsi_pll_clock_register_7nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+int dsi_pll_clock_register_28lpm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+static inline struct dsi_pll_vco_clk *to_vco_clk_hw(struct clk_hw *hw)
+{
+	return container_of(hw, struct dsi_pll_vco_clk, hw);
+}
+
+int dsi_pll_clock_register_14nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+#endif
diff --git a/drivers/clk/qcom/mdss/mdss-edp-pll-28hpm.c b/drivers/clk/qcom/mdss/mdss-edp-pll-28hpm.c
new file mode 100644
index 0000000..d8f5c3c
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-edp-pll-28hpm.c
@@ -0,0 +1,580 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include <dt-bindings/clock/msm-clocks-8974.h>
+
+#include "mdss-pll.h"
+#include "mdss-edp-pll.h"
+
+#define EDP_PHY_PLL_UNIPHY_PLL_REFCLK_CFG	(0x0)
+#define EDP_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG	(0x0004)
+#define EDP_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG	(0x000C)
+#define EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG		(0x0020)
+#define EDP_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG	(0x0024)
+#define EDP_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG	(0x0028)
+#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG0		(0x0038)
+#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG1		(0x003C)
+#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG2		(0x0040)
+#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG3		(0x0044)
+#define EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG4		(0x0048)
+#define EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG0		(0x004C)
+#define EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG1		(0x0050)
+#define EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG2		(0x0054)
+#define EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG3		(0x0058)
+#define EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG2	(0x0064)
+#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG0		(0x006C)
+#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG2		(0x0074)
+#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG6		(0x0084)
+#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG7		(0x0088)
+#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG8		(0x008C)
+#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG9		(0x0090)
+#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG10	(0x0094)
+#define EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG11	(0x0098)
+#define EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG0	(0x005C)
+#define EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG1	(0x0060)
+
+#define EDP_PLL_POLL_DELAY_US			50
+#define EDP_PLL_POLL_TIMEOUT_US			500
+
+static const struct clk_ops edp_mainlink_clk_src_ops;
+static struct clk_div_ops fixed_5div_ops; /* null ops */
+static const struct clk_ops edp_pixel_clk_ops;
+
+static inline struct edp_pll_vco_clk *to_edp_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct edp_pll_vco_clk, c);
+}
+
+int edp_div_prepare(struct clk *c)
+{
+	struct div_clk *div = to_div_clk(c);
+	/* Restore the divider's value */
+	return div->ops->set_div(div, div->data.div);
+}
+
+static int edp_vco_set_rate(struct clk *c, unsigned long vco_rate)
+{
+	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
+	struct mdss_pll_resources *edp_pll_res = vco->priv;
+	int rc;
+
+	pr_debug("vco_rate=%d\n", (int)vco_rate);
+
+	rc = mdss_pll_resource_enable(edp_pll_res, true);
+	if (rc) {
+		pr_err("failed to enable edp pll res rc=%d\n", rc);
+		rc =  -EINVAL;
+	}
+
+	if (vco_rate == 810000000) {
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_VCOLPF_CFG, 0x18);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_REFCLK_CFG, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG0, 0x36);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG1, 0x69);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG2, 0xff);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG3, 0x2f);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG0, 0x80);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG1, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG2, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG3, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x5a);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x0);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x0);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG10, 0x2a);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG11, 0x3);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG1, 0x1a);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, 0x00);
+	} else if (vco_rate == 1350000000) {
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG2, 0x0d);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG0, 0x36);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG1, 0x62);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG2, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG3, 0x28);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG0, 0x80);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG1, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG2, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_SSC_CFG3, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG0, 0x12);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG6, 0x5a);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG7, 0x0);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG9, 0x0);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG10, 0x46);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_CAL_CFG11, 0x5);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_LKDET_CFG1, 0x1a);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV1_CFG, 0x00);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV3_CFG, 0x00);
+	} else {
+		pr_err("rate=%d is NOT supported\n", (int)vco_rate);
+		vco_rate = 0;
+		rc =  -EINVAL;
+	}
+
+	MDSS_PLL_REG_W(edp_pll_res->pll_base,
+					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+	udelay(100);
+	MDSS_PLL_REG_W(edp_pll_res->pll_base,
+					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+	udelay(100);
+	MDSS_PLL_REG_W(edp_pll_res->pll_base,
+					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x07);
+	udelay(100);
+	MDSS_PLL_REG_W(edp_pll_res->pll_base,
+					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+	udelay(100);
+	mdss_pll_resource_enable(edp_pll_res, false);
+
+	vco->rate = vco_rate;
+
+	return rc;
+}
+
+static int edp_pll_ready_poll(struct mdss_pll_resources *edp_pll_res)
+{
+	int cnt;
+	u32 status;
+
+	cnt = 100;
+	while (cnt--) {
+		udelay(100);
+		status = MDSS_PLL_REG_R(edp_pll_res->pll_base, 0xc0);
+		status &= 0x01;
+		if (status)
+			break;
+	}
+	pr_debug("cnt=%d status=%d\n", cnt, (int)status);
+
+	if (status)
+		return 1;
+
+	return 0;
+}
+
+static int edp_vco_enable(struct clk *c)
+{
+	int i, ready;
+	int rc;
+	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
+	struct mdss_pll_resources *edp_pll_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(edp_pll_res, true);
+	if (rc) {
+		pr_err("edp pll resources not available\n");
+		return rc;
+	}
+
+	for (i = 0; i < 3; i++) {
+		ready = edp_pll_ready_poll(edp_pll_res);
+		if (ready)
+			break;
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x01);
+		udelay(100);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x05);
+		udelay(100);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x07);
+		udelay(100);
+		MDSS_PLL_REG_W(edp_pll_res->pll_base,
+					EDP_PHY_PLL_UNIPHY_PLL_GLB_CFG, 0x0f);
+		udelay(100);
+	}
+
+	if (ready) {
+		pr_debug("EDP PLL lock success\n");
+		edp_pll_res->pll_on = true;
+		rc = 0;
+	} else {
+		pr_err("EDP PLL failed to lock\n");
+		mdss_pll_resource_enable(edp_pll_res, false);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static void edp_vco_disable(struct clk *c)
+{
+	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
+	struct mdss_pll_resources *edp_pll_res = vco->priv;
+
+	if (!edp_pll_res) {
+		pr_err("Invalid input parameter\n");
+		return;
+	}
+
+	if (!edp_pll_res->pll_on &&
+		mdss_pll_resource_enable(edp_pll_res, true)) {
+		pr_err("edp pll resources not available\n");
+		return;
+	}
+
+	MDSS_PLL_REG_W(edp_pll_res->pll_base, 0x20, 0x00);
+
+	edp_pll_res->handoff_resources = false;
+	edp_pll_res->pll_on = false;
+
+	mdss_pll_resource_enable(edp_pll_res, false);
+
+	pr_debug("EDP PLL Disabled\n");
+}
+
+static unsigned long edp_vco_get_rate(struct clk *c)
+{
+	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
+	struct mdss_pll_resources *edp_pll_res = vco->priv;
+	u32 pll_status, div2;
+	int rc;
+
+	if (is_gdsc_disabled(edp_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(edp_pll_res, true);
+	if (rc) {
+		pr_err("edp pll resources not available\n");
+		return rc;
+	}
+
+	if (vco->rate == 0) {
+		pll_status = MDSS_PLL_REG_R(edp_pll_res->pll_base, 0xc0);
+		if (pll_status & 0x01) {
+			div2 = MDSS_PLL_REG_R(edp_pll_res->pll_base, 0x24);
+			if (div2 & 0x01)
+				vco->rate = 1350000000;
+			else
+				vco->rate = 810000000;
+		}
+	}
+	mdss_pll_resource_enable(edp_pll_res, false);
+
+	pr_debug("rate=%d\n", (int)vco->rate);
+
+	return vco->rate;
+}
+
+static long edp_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
+	unsigned long rrate = -ENOENT;
+	unsigned long *lp;
+
+	lp = vco->rate_list;
+	while (*lp) {
+		rrate = *lp;
+		if (rate <= rrate)
+			break;
+		lp++;
+	}
+
+	pr_debug("rrate=%d\n", (int)rrate);
+
+	return rrate;
+}
+
+static int edp_vco_prepare(struct clk *c)
+{
+	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
+
+	pr_debug("rate=%d\n", (int)vco->rate);
+
+	return edp_vco_set_rate(c, vco->rate);
+}
+
+static void edp_vco_unprepare(struct clk *c)
+{
+	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
+
+	pr_debug("rate=%d\n", (int)vco->rate);
+
+	edp_vco_disable(c);
+}
+
+static int edp_pll_lock_status(struct mdss_pll_resources *edp_pll_res)
+{
+	u32 status;
+	int pll_locked = 0;
+	int rc;
+
+	rc = mdss_pll_resource_enable(edp_pll_res, true);
+	if (rc) {
+		pr_err("edp pll resources not available\n");
+		return rc;
+	}
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic((edp_pll_res->pll_base + 0xc0),
+			status, ((status & BIT(0)) == 1),
+			EDP_PLL_POLL_DELAY_US,
+			EDP_PLL_POLL_TIMEOUT_US)) {
+		pr_debug("EDP PLL status=%x failed to Lock\n", status);
+		pll_locked = 0;
+	} else {
+		pll_locked = 1;
+	}
+	mdss_pll_resource_enable(edp_pll_res, false);
+
+	return pll_locked;
+}
+
+static enum handoff edp_vco_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct edp_pll_vco_clk *vco = to_edp_vco_clk(c);
+	struct mdss_pll_resources *edp_pll_res = vco->priv;
+
+	if (is_gdsc_disabled(edp_pll_res))
+		return HANDOFF_DISABLED_CLK;
+
+	if (mdss_pll_resource_enable(edp_pll_res, true)) {
+		pr_err("edp pll resources not available\n");
+		return ret;
+	}
+
+	edp_pll_res->handoff_resources = true;
+
+	if (edp_pll_lock_status(edp_pll_res)) {
+		c->rate = edp_vco_get_rate(c);
+		edp_pll_res->pll_on = true;
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		edp_pll_res->handoff_resources = false;
+		mdss_pll_resource_enable(edp_pll_res, false);
+	}
+
+	pr_debug("done, ret=%d\n", ret);
+	return ret;
+}
+
+static unsigned long edp_vco_rate_list[] = {
+		810000000, 1350000000, 0};
+
+struct const clk_ops edp_vco_clk_ops = {
+	.enable = edp_vco_enable,
+	.set_rate = edp_vco_set_rate,
+	.get_rate = edp_vco_get_rate,
+	.round_rate = edp_vco_round_rate,
+	.prepare = edp_vco_prepare,
+	.unprepare = edp_vco_unprepare,
+	.handoff = edp_vco_handoff,
+};
+
+struct edp_pll_vco_clk edp_vco_clk = {
+	.ref_clk_rate = 19200000,
+	.rate = 0,
+	.rate_list = edp_vco_rate_list,
+	.c = {
+		.dbg_name = "edp_vco_clk",
+		.ops = &edp_vco_clk_ops,
+		CLK_INIT(edp_vco_clk.c),
+	},
+};
+
+static unsigned long edp_mainlink_get_rate(struct clk *c)
+{
+	struct div_clk *mclk = to_div_clk(c);
+	struct clk *pclk;
+	unsigned long rate = 0;
+
+	pclk = clk_get_parent(c);
+
+	if (pclk && pclk->ops->get_rate) {
+		rate = pclk->ops->get_rate(pclk);
+		rate /= mclk->data.div;
+	}
+
+	pr_debug("rate=%d div=%d\n", (int)rate, mclk->data.div);
+
+	return rate;
+}
+
+
+struct div_clk edp_mainlink_clk_src = {
+	.ops = &fixed_5div_ops,
+	.data = {
+		.div = 5,
+		.min_div = 5,
+		.max_div = 5,
+	},
+	.c = {
+		.parent = &edp_vco_clk.c,
+		.dbg_name = "edp_mainlink_clk_src",
+		.ops = &edp_mainlink_clk_src_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(edp_mainlink_clk_src.c),
+	}
+};
+
+/*
+ * this rate is from pll to clock controller
+ * output from pll to CC has two possibilities
+ * 1: if mainlink rate is 270M, then 675M
+ * 2: if mainlink rate is 162M, then 810M
+ */
+static int edp_pixel_set_div(struct div_clk *clk, int div)
+{
+	int rc;
+	struct mdss_pll_resources *edp_pll_res = clk->priv;
+
+	rc = mdss_pll_resource_enable(edp_pll_res, true);
+	if (rc) {
+		pr_err("edp pll resources not available\n");
+		return rc;
+	}
+
+	pr_debug("div=%d\n", div);
+	MDSS_PLL_REG_W(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG, (div - 1));
+	mdss_pll_resource_enable(edp_pll_res, false);
+
+	return 0;
+}
+
+static int edp_pixel_get_div(struct div_clk *clk)
+{
+	int div = 0;
+	int rc;
+	struct mdss_pll_resources *edp_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(edp_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(edp_pll_res, true);
+	if (rc) {
+		pr_err("edp pll resources not available\n");
+		return rc;
+	}
+
+	div = MDSS_PLL_REG_R(edp_pll_res->pll_base,
+				EDP_PHY_PLL_UNIPHY_PLL_POSTDIV2_CFG);
+	mdss_pll_resource_enable(edp_pll_res, false);
+	div &= 0x01;
+	pr_debug("div=%d\n", div);
+	return div + 1;
+}
+
+static struct clk_div_ops edp_pixel_ops = {
+	.set_div = edp_pixel_set_div,
+	.get_div = edp_pixel_get_div,
+};
+
+struct div_clk edp_pixel_clk_src = {
+	.data = {
+		.max_div = 2,
+		.min_div = 1,
+	},
+	.ops = &edp_pixel_ops,
+	.c = {
+		.parent = &edp_vco_clk.c,
+		.dbg_name = "edp_pixel_clk_src",
+		.ops = &edp_pixel_clk_ops,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(edp_pixel_clk_src.c),
+	},
+};
+
+static struct clk_lookup mdss_edp_pllcc_8974[] = {
+	CLK_LOOKUP("edp_pixel_src", edp_pixel_clk_src.c,
+						"fd8c0000.qcom,mmsscc-mdss"),
+	CLK_LOOKUP("edp_mainlink_src", edp_mainlink_clk_src.c,
+						"fd8c0000.qcom,mmsscc-mdss"),
+};
+
+int edp_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP;
+
+	/* Set client data to div and vco clocks */
+	edp_pixel_clk_src.priv = pll_res;
+	edp_mainlink_clk_src.priv = pll_res;
+	edp_vco_clk.priv = pll_res;
+
+	/* Set clock operation for mainlink and pixel clock */
+	edp_mainlink_clk_src_ops = clk_ops_div;
+	edp_mainlink_clk_src_ops.get_parent = clk_get_parent;
+	edp_mainlink_clk_src_ops.get_rate = edp_mainlink_get_rate;
+
+	edp_pixel_clk_ops = clk_ops_slave_div;
+	edp_pixel_clk_ops.prepare = edp_div_prepare;
+
+	rc = of_msm_clock_register(pdev->dev.of_node, mdss_edp_pllcc_8974,
+					 ARRAY_SIZE(mdss_edp_pllcc_8974));
+	if (rc) {
+		pr_err("Clock register failed rc=%d\n", rc);
+		rc = -EPROBE_DEFER;
+	}
+
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-20nm.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-20nm.c
new file mode 100644
index 0000000..3894955
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-hdmi-pll-20nm.c
@@ -0,0 +1,970 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8994.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+/* hdmi phy registers */
+
+#define HDMI_PHY_CMD_SIZE  68
+#define HDMI_PHY_CLK_SIZE  97
+
+/* Set to 1 for auto KVCO cal; set to 0 for fixed value */
+#define HDMI_PHY_AUTO_KVCO_CAL    1
+
+/* PLL REGISTERS */
+#define QSERDES_COM_SYS_CLK_CTRL			(0x000)
+#define QSERDES_COM_PLL_VCOTAIL_EN			(0x004)
+#define QSERDES_COM_CMN_MODE				(0x008)
+#define QSERDES_COM_IE_TRIM				(0x00C)
+#define QSERDES_COM_IP_TRIM				(0x010)
+#define QSERDES_COM_PLL_CNTRL				(0x014)
+#define QSERDES_COM_PLL_PHSEL_CONTROL			(0x018)
+#define QSERDES_COM_IPTAT_TRIM_VCCA_TX_SEL		(0x01C)
+#define QSERDES_COM_PLL_PHSEL_DC			(0x020)
+#define QSERDES_COM_PLL_IP_SETI				(0x024)
+#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL		(0x028)
+#define QSERDES_COM_PLL_BKG_KVCO_CAL_EN			(0x02C)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN			(0x030)
+#define QSERDES_COM_PLL_CP_SETI				(0x034)
+#define QSERDES_COM_PLL_IP_SETP				(0x038)
+#define QSERDES_COM_PLL_CP_SETP				(0x03C)
+#define QSERDES_COM_ATB_SEL1				(0x040)
+#define QSERDES_COM_ATB_SEL2				(0x044)
+#define QSERDES_COM_SYSCLK_EN_SEL_TXBAND		(0x048)
+#define QSERDES_COM_RESETSM_CNTRL			(0x04C)
+#define QSERDES_COM_RESETSM_CNTRL2			(0x050)
+#define QSERDES_COM_RESETSM_CNTRL3			(0x054)
+#define QSERDES_COM_RESETSM_PLL_CAL_COUNT1		(0x058)
+#define QSERDES_COM_RESETSM_PLL_CAL_COUNT2		(0x05C)
+#define QSERDES_COM_DIV_REF1				(0x060)
+#define QSERDES_COM_DIV_REF2				(0x064)
+#define QSERDES_COM_KVCO_COUNT1				(0x068)
+#define QSERDES_COM_KVCO_COUNT2				(0x06C)
+#define QSERDES_COM_KVCO_CAL_CNTRL			(0x070)
+#define QSERDES_COM_KVCO_CODE				(0x074)
+#define QSERDES_COM_VREF_CFG1				(0x078)
+#define QSERDES_COM_VREF_CFG2				(0x07C)
+#define QSERDES_COM_VREF_CFG3				(0x080)
+#define QSERDES_COM_VREF_CFG4				(0x084)
+#define QSERDES_COM_VREF_CFG5				(0x088)
+#define QSERDES_COM_VREF_CFG6				(0x08C)
+#define QSERDES_COM_PLLLOCK_CMP1			(0x090)
+#define QSERDES_COM_PLLLOCK_CMP2			(0x094)
+#define QSERDES_COM_PLLLOCK_CMP3			(0x098)
+#define QSERDES_COM_PLLLOCK_CMP_EN			(0x09C)
+#define QSERDES_COM_BGTC				(0x0A0)
+#define QSERDES_COM_PLL_TEST_UPDN			(0x0A4)
+#define QSERDES_COM_PLL_VCO_TUNE			(0x0A8)
+#define QSERDES_COM_DEC_START1				(0x0AC)
+#define QSERDES_COM_PLL_AMP_OS				(0x0B0)
+#define QSERDES_COM_SSC_EN_CENTER			(0x0B4)
+#define QSERDES_COM_SSC_ADJ_PER1			(0x0B8)
+#define QSERDES_COM_SSC_ADJ_PER2			(0x0BC)
+#define QSERDES_COM_SSC_PER1				(0x0C0)
+#define QSERDES_COM_SSC_PER2				(0x0C4)
+#define QSERDES_COM_SSC_STEP_SIZE1			(0x0C8)
+#define QSERDES_COM_SSC_STEP_SIZE2			(0x0CC)
+#define QSERDES_COM_RES_CODE_UP				(0x0D0)
+#define QSERDES_COM_RES_CODE_DN				(0x0D4)
+#define QSERDES_COM_RES_CODE_UP_OFFSET			(0x0D8)
+#define QSERDES_COM_RES_CODE_DN_OFFSET			(0x0DC)
+#define QSERDES_COM_RES_CODE_START_SEG1			(0x0E0)
+#define QSERDES_COM_RES_CODE_START_SEG2			(0x0E4)
+#define QSERDES_COM_RES_CODE_CAL_CSR			(0x0E8)
+#define QSERDES_COM_RES_CODE				(0x0EC)
+#define QSERDES_COM_RES_TRIM_CONTROL			(0x0F0)
+#define QSERDES_COM_RES_TRIM_CONTROL2			(0x0F4)
+#define QSERDES_COM_RES_TRIM_EN_VCOCALDONE		(0x0F8)
+#define QSERDES_COM_FAUX_EN				(0x0FC)
+#define QSERDES_COM_DIV_FRAC_START1			(0x100)
+#define QSERDES_COM_DIV_FRAC_START2			(0x104)
+#define QSERDES_COM_DIV_FRAC_START3			(0x108)
+#define QSERDES_COM_DEC_START2				(0x10C)
+#define QSERDES_COM_PLL_RXTXEPCLK_EN			(0x110)
+#define QSERDES_COM_PLL_CRCTRL				(0x114)
+#define QSERDES_COM_PLL_CLKEPDIV			(0x118)
+#define QSERDES_COM_PLL_FREQUPDATE			(0x11C)
+#define QSERDES_COM_PLL_BKGCAL_TRIM_UP			(0x120)
+#define QSERDES_COM_PLL_BKGCAL_TRIM_DN			(0x124)
+#define QSERDES_COM_PLL_BKGCAL_TRIM_MUX			(0x128)
+#define QSERDES_COM_PLL_BKGCAL_VREF_CFG			(0x12C)
+#define QSERDES_COM_PLL_BKGCAL_DIV_REF1			(0x130)
+#define QSERDES_COM_PLL_BKGCAL_DIV_REF2			(0x134)
+#define QSERDES_COM_MUXADDR				(0x138)
+#define QSERDES_COM_LOW_POWER_RO_CONTROL		(0x13C)
+#define QSERDES_COM_POST_DIVIDER_CONTROL		(0x140)
+#define QSERDES_COM_HR_OCLK2_DIVIDER			(0x144)
+#define QSERDES_COM_HR_OCLK3_DIVIDER			(0x148)
+#define QSERDES_COM_PLL_VCO_HIGH			(0x14C)
+#define QSERDES_COM_RESET_SM				(0x150)
+#define QSERDES_COM_MUXVAL				(0x154)
+#define QSERDES_COM_CORE_RES_CODE_DN			(0x158)
+#define QSERDES_COM_CORE_RES_CODE_UP			(0x15C)
+#define QSERDES_COM_CORE_VCO_TUNE			(0x160)
+#define QSERDES_COM_CORE_VCO_TAIL			(0x164)
+#define QSERDES_COM_CORE_KVCO_CODE			(0x168)
+
+/* Tx Channel 0 REGISTERS */
+#define QSERDES_TX_L0_BIST_MODE_LANENO			(0x00)
+#define QSERDES_TX_L0_CLKBUF_ENABLE			(0x04)
+#define QSERDES_TX_L0_TX_EMP_POST1_LVL			(0x08)
+#define QSERDES_TX_L0_TX_DRV_LVL			(0x0C)
+#define QSERDES_TX_L0_RESET_TSYNC_EN			(0x10)
+#define QSERDES_TX_L0_LPB_EN				(0x14)
+#define QSERDES_TX_L0_RES_CODE_UP			(0x18)
+#define QSERDES_TX_L0_RES_CODE_DN			(0x1C)
+#define QSERDES_TX_L0_PERL_LENGTH1			(0x20)
+#define QSERDES_TX_L0_PERL_LENGTH2			(0x24)
+#define QSERDES_TX_L0_SERDES_BYP_EN_OUT			(0x28)
+#define QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	(0x2C)
+#define QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN	(0x30)
+#define QSERDES_TX_L0_BIST_PATTERN1			(0x34)
+#define QSERDES_TX_L0_BIST_PATTERN2			(0x38)
+#define QSERDES_TX_L0_BIST_PATTERN3			(0x3C)
+#define QSERDES_TX_L0_BIST_PATTERN4			(0x40)
+#define QSERDES_TX_L0_BIST_PATTERN5			(0x44)
+#define QSERDES_TX_L0_BIST_PATTERN6			(0x48)
+#define QSERDES_TX_L0_BIST_PATTERN7			(0x4C)
+#define QSERDES_TX_L0_BIST_PATTERN8			(0x50)
+#define QSERDES_TX_L0_LANE_MODE				(0x54)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE		(0x58)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE_CONFIGURATION	(0x5C)
+#define QSERDES_TX_L0_ATB_SEL1				(0x60)
+#define QSERDES_TX_L0_ATB_SEL2				(0x64)
+#define QSERDES_TX_L0_RCV_DETECT_LVL			(0x68)
+#define QSERDES_TX_L0_PRBS_SEED1			(0x6C)
+#define QSERDES_TX_L0_PRBS_SEED2			(0x70)
+#define QSERDES_TX_L0_PRBS_SEED3			(0x74)
+#define QSERDES_TX_L0_PRBS_SEED4			(0x78)
+#define QSERDES_TX_L0_RESET_GEN				(0x7C)
+#define QSERDES_TX_L0_TRAN_DRVR_EMP_EN			(0x80)
+#define QSERDES_TX_L0_TX_INTERFACE_MODE			(0x84)
+#define QSERDES_TX_L0_PWM_CTRL				(0x88)
+#define QSERDES_TX_L0_PWM_DATA				(0x8C)
+#define QSERDES_TX_L0_PWM_ENC_DIV_CTRL			(0x90)
+#define QSERDES_TX_L0_VMODE_CTRL1			(0x94)
+#define QSERDES_TX_L0_VMODE_CTRL2			(0x98)
+#define QSERDES_TX_L0_VMODE_CTRL3			(0x9C)
+#define QSERDES_TX_L0_VMODE_CTRL4			(0xA0)
+#define QSERDES_TX_L0_VMODE_CTRL5			(0xA4)
+#define QSERDES_TX_L0_VMODE_CTRL6			(0xA8)
+#define QSERDES_TX_L0_VMODE_CTRL7			(0xAC)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV_CNTL		(0xB0)
+#define QSERDES_TX_L0_BIST_STATUS			(0xB4)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT1			(0xB8)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT2			(0xBC)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV			(0xC0)
+#define QSERDES_TX_L0_PWM_DEC_STATUS			(0xC4)
+
+/* Tx Channel 1 REGISTERS */
+#define QSERDES_TX_L1_BIST_MODE_LANENO			(0x00)
+#define QSERDES_TX_L1_CLKBUF_ENABLE			(0x04)
+#define QSERDES_TX_L1_TX_EMP_POST1_LVL			(0x08)
+#define QSERDES_TX_L1_TX_DRV_LVL			(0x0C)
+#define QSERDES_TX_L1_RESET_TSYNC_EN			(0x10)
+#define QSERDES_TX_L1_LPB_EN				(0x14)
+#define QSERDES_TX_L1_RES_CODE_UP			(0x18)
+#define QSERDES_TX_L1_RES_CODE_DN			(0x1C)
+#define QSERDES_TX_L1_PERL_LENGTH1			(0x20)
+#define QSERDES_TX_L1_PERL_LENGTH2			(0x24)
+#define QSERDES_TX_L1_SERDES_BYP_EN_OUT			(0x28)
+#define QSERDES_TX_L1_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	(0x2C)
+#define QSERDES_TX_L1_PARRATE_REC_DETECT_IDLE_EN	(0x30)
+#define QSERDES_TX_L1_BIST_PATTERN1			(0x34)
+#define QSERDES_TX_L1_BIST_PATTERN2			(0x38)
+#define QSERDES_TX_L1_BIST_PATTERN3			(0x3C)
+#define QSERDES_TX_L1_BIST_PATTERN4			(0x40)
+#define QSERDES_TX_L1_BIST_PATTERN5			(0x44)
+#define QSERDES_TX_L1_BIST_PATTERN6			(0x48)
+#define QSERDES_TX_L1_BIST_PATTERN7			(0x4C)
+#define QSERDES_TX_L1_BIST_PATTERN8			(0x50)
+#define QSERDES_TX_L1_LANE_MODE				(0x54)
+#define QSERDES_TX_L1_IDAC_CAL_LANE_MODE		(0x58)
+#define QSERDES_TX_L1_IDAC_CAL_LANE_MODE_CONFIGURATION	(0x5C)
+#define QSERDES_TX_L1_ATB_SEL1				(0x60)
+#define QSERDES_TX_L1_ATB_SEL2				(0x64)
+#define QSERDES_TX_L1_RCV_DETECT_LVL			(0x68)
+#define QSERDES_TX_L1_PRBS_SEED1			(0x6C)
+#define QSERDES_TX_L1_PRBS_SEED2			(0x70)
+#define QSERDES_TX_L1_PRBS_SEED3			(0x74)
+#define QSERDES_TX_L1_PRBS_SEED4			(0x78)
+#define QSERDES_TX_L1_RESET_GEN				(0x7C)
+#define QSERDES_TX_L1_TRAN_DRVR_EMP_EN			(0x80)
+#define QSERDES_TX_L1_TX_INTERFACE_MODE			(0x84)
+#define QSERDES_TX_L1_PWM_CTRL				(0x88)
+#define QSERDES_TX_L1_PWM_DATA				(0x8C)
+#define QSERDES_TX_L1_PWM_ENC_DIV_CTRL			(0x90)
+#define QSERDES_TX_L1_VMODE_CTRL1			(0x94)
+#define QSERDES_TX_L1_VMODE_CTRL2			(0x98)
+#define QSERDES_TX_L1_VMODE_CTRL3			(0x9C)
+#define QSERDES_TX_L1_VMODE_CTRL4			(0xA0)
+#define QSERDES_TX_L1_VMODE_CTRL5			(0xA4)
+#define QSERDES_TX_L1_VMODE_CTRL6			(0xA8)
+#define QSERDES_TX_L1_VMODE_CTRL7			(0xAC)
+#define QSERDES_TX_L1_TX_ALOG_INTF_OBSV_CNTL		(0xB0)
+#define QSERDES_TX_L1_BIST_STATUS			(0xB4)
+#define QSERDES_TX_L1_BIST_ERROR_COUNT1			(0xB8)
+#define QSERDES_TX_L1_BIST_ERROR_COUNT2			(0xBC)
+#define QSERDES_TX_L1_TX_ALOG_INTF_OBSV			(0xC0)
+#define QSERDES_TX_L1_PWM_DEC_STATUS			(0xC4)
+
+/* Tx Channel 2 REGISERS */
+#define QSERDES_TX_L2_BIST_MODE_LANENO			(0x00)
+#define QSERDES_TX_L2_CLKBUF_ENABLE			(0x04)
+#define QSERDES_TX_L2_TX_EMP_POST1_LVL			(0x08)
+#define QSERDES_TX_L2_TX_DRV_LVL			(0x0C)
+#define QSERDES_TX_L2_RESET_TSYNC_EN			(0x10)
+#define QSERDES_TX_L2_LPB_EN				(0x14)
+#define QSERDES_TX_L2_RES_CODE_UP			(0x18)
+#define QSERDES_TX_L2_RES_CODE_DN			(0x1C)
+#define QSERDES_TX_L2_PERL_LENGTH1			(0x20)
+#define QSERDES_TX_L2_PERL_LENGTH2			(0x24)
+#define QSERDES_TX_L2_SERDES_BYP_EN_OUT			(0x28)
+#define QSERDES_TX_L2_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	(0x2C)
+#define QSERDES_TX_L2_PARRATE_REC_DETECT_IDLE_EN	(0x30)
+#define QSERDES_TX_L2_BIST_PATTERN1			(0x34)
+#define QSERDES_TX_L2_BIST_PATTERN2			(0x38)
+#define QSERDES_TX_L2_BIST_PATTERN3			(0x3C)
+#define QSERDES_TX_L2_BIST_PATTERN4			(0x40)
+#define QSERDES_TX_L2_BIST_PATTERN5			(0x44)
+#define QSERDES_TX_L2_BIST_PATTERN6			(0x48)
+#define QSERDES_TX_L2_BIST_PATTERN7			(0x4C)
+#define QSERDES_TX_L2_BIST_PATTERN8			(0x50)
+#define QSERDES_TX_L2_LANE_MODE				(0x54)
+#define QSERDES_TX_L2_IDAC_CAL_LANE_MODE		(0x58)
+#define QSERDES_TX_L2_IDAC_CAL_LANE_MODE_CONFIGURATION	(0x5C)
+#define QSERDES_TX_L2_ATB_SEL1				(0x60)
+#define QSERDES_TX_L2_ATB_SEL2				(0x64)
+#define QSERDES_TX_L2_RCV_DETECT_LVL			(0x68)
+#define QSERDES_TX_L2_PRBS_SEED1			(0x6C)
+#define QSERDES_TX_L2_PRBS_SEED2			(0x70)
+#define QSERDES_TX_L2_PRBS_SEED3			(0x74)
+#define QSERDES_TX_L2_PRBS_SEED4			(0x78)
+#define QSERDES_TX_L2_RESET_GEN				(0x7C)
+#define QSERDES_TX_L2_TRAN_DRVR_EMP_EN			(0x80)
+#define QSERDES_TX_L2_TX_INTERFACE_MODE			(0x84)
+#define QSERDES_TX_L2_PWM_CTRL				(0x88)
+#define QSERDES_TX_L2_PWM_DATA				(0x8C)
+#define QSERDES_TX_L2_PWM_ENC_DIV_CTRL			(0x90)
+#define QSERDES_TX_L2_VMODE_CTRL1			(0x94)
+#define QSERDES_TX_L2_VMODE_CTRL2			(0x98)
+#define QSERDES_TX_L2_VMODE_CTRL3			(0x9C)
+#define QSERDES_TX_L2_VMODE_CTRL4			(0xA0)
+#define QSERDES_TX_L2_VMODE_CTRL5			(0xA4)
+#define QSERDES_TX_L2_VMODE_CTRL6			(0xA8)
+#define QSERDES_TX_L2_VMODE_CTRL7			(0xAC)
+#define QSERDES_TX_L2_TX_ALOG_INTF_OBSV_CNTL		(0xB0)
+#define QSERDES_TX_L2_BIST_STATUS			(0xB4)
+#define QSERDES_TX_L2_BIST_ERROR_COUNT1			(0xB8)
+#define QSERDES_TX_L2_BIST_ERROR_COUNT2			(0xBC)
+#define QSERDES_TX_L2_TX_ALOG_INTF_OBSV			(0xC0)
+#define QSERDES_TX_L2_PWM_DEC_STATUS			(0xC4)
+
+/* Tx Channel 3 REGISERS */
+#define QSERDES_TX_L3_BIST_MODE_LANENO			(0x00)
+#define QSERDES_TX_L3_CLKBUF_ENABLE			(0x04)
+#define QSERDES_TX_L3_TX_EMP_POST1_LVL			(0x08)
+#define QSERDES_TX_L3_TX_DRV_LVL			(0x0C)
+#define QSERDES_TX_L3_RESET_TSYNC_EN			(0x10)
+#define QSERDES_TX_L3_LPB_EN				(0x14)
+#define QSERDES_TX_L3_RES_CODE_UP			(0x18)
+#define QSERDES_TX_L3_RES_CODE_DN			(0x1C)
+#define QSERDES_TX_L3_PERL_LENGTH1			(0x20)
+#define QSERDES_TX_L3_PERL_LENGTH2			(0x24)
+#define QSERDES_TX_L3_SERDES_BYP_EN_OUT			(0x28)
+#define QSERDES_TX_L3_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN	(0x2C)
+#define QSERDES_TX_L3_PARRATE_REC_DETECT_IDLE_EN	(0x30)
+#define QSERDES_TX_L3_BIST_PATTERN1			(0x34)
+#define QSERDES_TX_L3_BIST_PATTERN2			(0x38)
+#define QSERDES_TX_L3_BIST_PATTERN3			(0x3C)
+#define QSERDES_TX_L3_BIST_PATTERN4			(0x40)
+#define QSERDES_TX_L3_BIST_PATTERN5			(0x44)
+#define QSERDES_TX_L3_BIST_PATTERN6			(0x48)
+#define QSERDES_TX_L3_BIST_PATTERN7			(0x4C)
+#define QSERDES_TX_L3_BIST_PATTERN8			(0x50)
+#define QSERDES_TX_L3_LANE_MODE				(0x54)
+#define QSERDES_TX_L3_IDAC_CAL_LANE_MODE		(0x58)
+#define QSERDES_TX_L3_IDAC_CAL_LANE_MODE_CONFIGURATION	(0x5C)
+#define QSERDES_TX_L3_ATB_SEL1				(0x60)
+#define QSERDES_TX_L3_ATB_SEL2				(0x64)
+#define QSERDES_TX_L3_RCV_DETECT_LVL			(0x68)
+#define QSERDES_TX_L3_PRBS_SEED1			(0x6C)
+#define QSERDES_TX_L3_PRBS_SEED2			(0x70)
+#define QSERDES_TX_L3_PRBS_SEED3			(0x74)
+#define QSERDES_TX_L3_PRBS_SEED4			(0x78)
+#define QSERDES_TX_L3_RESET_GEN				(0x7C)
+#define QSERDES_TX_L3_TRAN_DRVR_EMP_EN			(0x80)
+#define QSERDES_TX_L3_TX_INTERFACE_MODE			(0x84)
+#define QSERDES_TX_L3_PWM_CTRL				(0x88)
+#define QSERDES_TX_L3_PWM_DATA				(0x8C)
+#define QSERDES_TX_L3_PWM_ENC_DIV_CTRL			(0x90)
+#define QSERDES_TX_L3_VMODE_CTRL1			(0x94)
+#define QSERDES_TX_L3_VMODE_CTRL2			(0x98)
+#define QSERDES_TX_L3_VMODE_CTRL3			(0x9C)
+#define QSERDES_TX_L3_VMODE_CTRL4			(0xA0)
+#define QSERDES_TX_L3_VMODE_CTRL5			(0xA4)
+#define QSERDES_TX_L3_VMODE_CTRL6			(0xA8)
+#define QSERDES_TX_L3_VMODE_CTRL7			(0xAC)
+#define QSERDES_TX_L3_TX_ALOG_INTF_OBSV_CNTL		(0xB0)
+#define QSERDES_TX_L3_BIST_STATUS			(0xB4)
+#define QSERDES_TX_L3_BIST_ERROR_COUNT1			(0xB8)
+#define QSERDES_TX_L3_BIST_ERROR_COUNT2			(0xBC)
+#define QSERDES_TX_L3_TX_ALOG_INTF_OBSV			(0xC0)
+#define QSERDES_TX_L3_PWM_DEC_STATUS			(0xC4)
+
+/* HDMI PHY REGISTERS */
+#define HDMI_PHY_CFG					(0x00)
+#define HDMI_PHY_PD_CTL					(0x04)
+#define HDMI_PHY_MODE					(0x08)
+#define HDMI_PHY_MISR_CLEAR				(0x0C)
+#define HDMI_PHY_TX0_TX1_BIST_CFG0			(0x10)
+#define HDMI_PHY_TX0_TX1_BIST_CFG1			(0x14)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE0		(0x18)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE1		(0x1C)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE2		(0x20)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE3		(0x24)
+#define HDMI_PHY_TX0_TX1_PRBS_POLY_BYTE0		(0x28)
+#define HDMI_PHY_TX0_TX1_PRBS_POLY_BYTE1		(0x2C)
+#define HDMI_PHY_TX0_TX1_PRBS_POLY_BYTE2		(0x30)
+#define HDMI_PHY_TX0_TX1_PRBS_POLY_BYTE3		(0x34)
+#define HDMI_PHY_TX2_TX3_BIST_CFG0			(0x38)
+#define HDMI_PHY_TX2_TX3_BIST_CFG1			(0x3C)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE0		(0x40)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE1		(0x44)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE2		(0x48)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE3		(0x4C)
+#define HDMI_PHY_TX2_TX3_PRBS_POLY_BYTE0		(0x50)
+#define HDMI_PHY_TX2_TX3_PRBS_POLY_BYTE1		(0x54)
+#define HDMI_PHY_TX2_TX3_PRBS_POLY_BYTE2		(0x58)
+#define HDMI_PHY_TX2_TX3_PRBS_POLY_BYTE3		(0x5C)
+#define HDMI_PHY_DEBUG_BUS_SEL				(0x60)
+#define HDMI_PHY_TXCAL_CFG0				(0x64)
+#define HDMI_PHY_TXCAL_CFG1				(0x68)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS0			(0x6C)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS1			(0x70)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS2			(0x74)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS0			(0x78)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS1			(0x7C)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS2			(0x80)
+#define HDMI_PHY_PRE_MISR_STATUS0			(0x84)
+#define HDMI_PHY_PRE_MISR_STATUS1			(0x88)
+#define HDMI_PHY_PRE_MISR_STATUS2			(0x8C)
+#define HDMI_PHY_PRE_MISR_STATUS3			(0x90)
+#define HDMI_PHY_POST_MISR_STATUS0			(0x94)
+#define HDMI_PHY_POST_MISR_STATUS1			(0x98)
+#define HDMI_PHY_POST_MISR_STATUS2			(0x9C)
+#define HDMI_PHY_POST_MISR_STATUS3			(0xA0)
+#define HDMI_PHY_STATUS					(0xA4)
+#define HDMI_PHY_MISC3_STATUS				(0xA8)
+#define HDMI_PHY_DEBUG_BUS0				(0xAC)
+#define HDMI_PHY_DEBUG_BUS1				(0xB0)
+#define HDMI_PHY_DEBUG_BUS2				(0xB4)
+#define HDMI_PHY_DEBUG_BUS3				(0xB8)
+#define HDMI_PHY_REVISION_ID0				(0xBC)
+#define HDMI_PHY_REVISION_ID1				(0xC0)
+#define HDMI_PHY_REVISION_ID2				(0xC4)
+#define HDMI_PHY_REVISION_ID3				(0xC8)
+
+#define HDMI_PLL_POLL_DELAY_US			50
+#define HDMI_PLL_POLL_TIMEOUT_US		125000
+#define HDMI_PLL_REF_CLK_RATE			192ULL
+#define HDMI_PLL_DIVISOR			10000000000ULL
+#define HDMI_PLL_DIVISOR_32			100000U
+#define HDMI_PLL_MIN_VCO_CLK			160000000ULL
+#define HDMI_PLL_TMDS_MAX			800000000U
+
+
+static int hdmi_20nm_pll_lock_status(struct mdss_pll_resources *io)
+{
+	u32 status;
+	int pll_locked = 0;
+	int phy_ready = 0;
+	int rc;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	/* Poll for C_READY and PHY READY */
+	pr_debug("%s: Waiting for PHY Ready\n", __func__);
+
+	/* poll for PLL ready status */
+	if (!readl_poll_timeout_atomic(
+		(io->pll_base + QSERDES_COM_RESET_SM),
+		status, ((status & BIT(6)) == 1),
+		HDMI_PLL_POLL_DELAY_US,
+		HDMI_PLL_POLL_TIMEOUT_US)) {
+		pr_debug("%s: C READY\n", __func__);
+		pll_locked = 1;
+	} else {
+		pr_debug("%s: C READY TIMEOUT\n", __func__);
+		pll_locked = 0;
+	}
+
+	/* poll for PHY ready status */
+	if (pll_locked && !readl_poll_timeout_atomic(
+		(io->phy_base + HDMI_PHY_STATUS),
+		status, ((status & BIT(0)) == 1),
+		HDMI_PLL_POLL_DELAY_US,
+		HDMI_PLL_POLL_TIMEOUT_US)) {
+		pr_debug("%s: PHY READY\n", __func__);
+		phy_ready = 1;
+	} else {
+		pr_debug("%s: PHY READY TIMEOUT\n", __func__);
+		phy_ready = 0;
+	}
+	mdss_pll_resource_enable(io, false);
+
+	return phy_ready;
+}
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_20nm_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+static inline u32 hdmi_20nm_phy_pll_vco_reg_val(struct hdmi_pll_cfg *pll_cfg,
+								u32 tmds_clk)
+{
+	u32 index = 0;
+
+	while (pll_cfg[index].vco_rate < HDMI_PLL_TMDS_MAX &&
+					pll_cfg[index].vco_rate < tmds_clk)
+		index++;
+	return pll_cfg[index].reg;
+}
+
+static void hdmi_20nm_phy_pll_calc_settings(struct mdss_pll_resources *io,
+			struct hdmi_pll_vco_clk *vco, u32 vco_clk, u32 tmds_clk)
+{
+	u32 val = 0;
+	u64 dec_start_val, frac_start_val, pll_lock_cmp;
+
+	/* Calculate decimal and fractional values */
+	dec_start_val = 1000000UL * vco_clk;
+	do_div(dec_start_val, HDMI_PLL_REF_CLK_RATE);
+	do_div(dec_start_val, 2U);
+	frac_start_val = dec_start_val;
+	do_div(frac_start_val, HDMI_PLL_DIVISOR_32);
+	do_div(frac_start_val, HDMI_PLL_DIVISOR_32);
+	frac_start_val *= HDMI_PLL_DIVISOR;
+	frac_start_val = dec_start_val - frac_start_val;
+	frac_start_val *= (u64)(2 << 19);
+	do_div(frac_start_val, HDMI_PLL_DIVISOR_32);
+	do_div(frac_start_val, HDMI_PLL_DIVISOR_32);
+	pll_lock_cmp = dec_start_val;
+	do_div(pll_lock_cmp, 10U);
+	pll_lock_cmp *= 0x800;
+	do_div(pll_lock_cmp, HDMI_PLL_DIVISOR_32);
+	do_div(pll_lock_cmp, HDMI_PLL_DIVISOR_32);
+	pll_lock_cmp -= 1U;
+	do_div(dec_start_val, HDMI_PLL_DIVISOR_32);
+	do_div(dec_start_val, HDMI_PLL_DIVISOR_32);
+
+	/* PLL loop bandwidth */
+	val = hdmi_20nm_phy_pll_vco_reg_val(vco->ip_seti, tmds_clk);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IP_SETI, val);
+	val = hdmi_20nm_phy_pll_vco_reg_val(vco->cp_seti, tmds_clk);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CP_SETI, val);
+	val = hdmi_20nm_phy_pll_vco_reg_val(vco->cp_setp, tmds_clk);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CP_SETP, val);
+	val = hdmi_20nm_phy_pll_vco_reg_val(vco->ip_setp, tmds_clk);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IP_SETP, val);
+	val = hdmi_20nm_phy_pll_vco_reg_val(vco->crctrl, tmds_clk);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CRCTRL, val);
+
+	/* PLL calibration */
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START1,
+		0x80 | (frac_start_val & 0x7F));
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START2,
+		0x80 | ((frac_start_val >> 7) & 0x7F));
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START3,
+		0x40 | ((frac_start_val >> 14) & 0x3F));
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START1,
+		0x80 | (dec_start_val & 0x7F));
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START2,
+		0x02 | (0x01 & (dec_start_val >> 7)));
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLLLOCK_CMP1,
+		pll_lock_cmp & 0xFF);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLLLOCK_CMP2,
+		(pll_lock_cmp >> 8) & 0xFF);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLLLOCK_CMP3,
+		(pll_lock_cmp >> 16) & 0xFF);
+}
+
+static u32 hdmi_20nm_phy_pll_set_clk_rate(struct clk *c, u32 tmds_clk)
+{
+	u32 tx_band = 0;
+
+	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	u64 vco_clk = tmds_clk;
+
+	while (vco_clk > 0 && vco_clk < HDMI_PLL_MIN_VCO_CLK) {
+		tx_band++;
+		vco_clk *= 2;
+	}
+
+	/* Initially shut down PHY */
+	pr_debug("%s: Disabling PHY\n", __func__);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x0);
+	udelay(1000);
+	/* memory barrier */
+	mb();
+
+	/* power-up and recommended common block settings */
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1F);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x01);
+	udelay(1000);
+	/* memory barrier */
+	mb();
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x07);
+	udelay(1000);
+	/* memory barrier */
+	mb();
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x05);
+	udelay(1000);
+	/* memory barrier */
+	mb();
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x42);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_VCOTAIL_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MODE, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_IE_TRIM, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_IP_TRIM, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CNTRL, 0x07);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_PHSEL_CONTROL, 0x04);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_IPTAT_TRIM_VCCA_TX_SEL, 0xA0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_PHSEL_DC, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORE_CLK_IN_SYNC_SEL, 0x00);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_BKG_KVCO_CAL_EN, 0x00);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x0F);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x01);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x01);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_EN_SEL_TXBAND,
+		0x4A + (0x10 * tx_band));
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG1, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG2, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BGTC, 0xFF);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_TEST_UPDN, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_VCO_TUNE, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_AMP_OS, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_EN_CENTER, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_UP, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_DN, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_KVCO_CODE,
+		tmds_clk > 300000000 ? 0x3F : 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_KVCO_COUNT1,
+		tmds_clk > 300000000 ? 0x00 : 0x8A);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_REF1, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_REF2,
+		tmds_clk > 300000000 ? 0x00 : 0x01);
+
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_KVCO_CAL_CNTRL,
+		tmds_clk > 300000000 ? 0x00 : 0x1F);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG3,
+		tmds_clk > 300000000 ? 0x00 : 0x40);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG4, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VREF_CFG5, 0x10);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL,
+		tmds_clk > 300000000 ? 0x80 : 0x00);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_CAL_CSR, 0x77);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_TRIM_EN_VCOCALDONE, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_RXTXEPCLK_EN, 0x0C);
+
+	hdmi_20nm_phy_pll_calc_settings(io, vco, vco_clk, tmds_clk);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLLLOCK_CMP_EN, 0x11);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CNTRL, 0x07);
+
+	/* Resistor calibration linear search */
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_START_SEG1, 0x60);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_CODE_START_SEG2, 0x60);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RES_TRIM_CONTROL, 0x01);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL2, 0x07);
+
+	udelay(1000);
+	/* memory barrier */
+	mb();
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_MODE, tx_band);
+
+	/* TX lanes (transceivers) power-up sequence */
+	MDSS_PLL_REG_W(io->pll_base + 0x400, QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + 0x600, QSERDES_TX_L1_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + 0x800, QSERDES_TX_L2_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + 0xA00, QSERDES_TX_L3_CLKBUF_ENABLE, 0x03);
+
+	MDSS_PLL_REG_W(io->pll_base + 0x400,
+		QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + 0x600,
+		QSERDES_TX_L1_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + 0x800,
+		QSERDES_TX_L2_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + 0xA00,
+		QSERDES_TX_L3_TRAN_DRVR_EMP_EN, 0x03);
+
+	MDSS_PLL_REG_W(io->pll_base + 0x400,
+		QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x6F);
+	MDSS_PLL_REG_W(io->pll_base + 0x600,
+		QSERDES_TX_L1_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x6F);
+	MDSS_PLL_REG_W(io->pll_base + 0x800,
+		QSERDES_TX_L2_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x6F);
+	MDSS_PLL_REG_W(io->pll_base + 0xA00,
+		QSERDES_TX_L3_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN, 0x6F);
+
+	MDSS_PLL_REG_W(io->pll_base + 0x400,
+		QSERDES_TX_L0_TX_EMP_POST1_LVL, 0x0000002F);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG0, 0x000000AF);
+
+	MDSS_PLL_REG_W(io->pll_base + 0x400, QSERDES_TX_L0_VMODE_CTRL1, 0x08);
+	MDSS_PLL_REG_W(io->pll_base + 0x800, QSERDES_TX_L2_VMODE_CTRL1, 0x09);
+	MDSS_PLL_REG_W(io->pll_base + 0x400, QSERDES_TX_L0_VMODE_CTRL5, 0xA0);
+	MDSS_PLL_REG_W(io->pll_base + 0x400, QSERDES_TX_L0_VMODE_CTRL6, 0x01);
+	MDSS_PLL_REG_W(io->pll_base + 0x800, QSERDES_TX_L2_VMODE_CTRL5, 0xA0);
+	MDSS_PLL_REG_W(io->pll_base + 0x800, QSERDES_TX_L2_VMODE_CTRL6, 0x01);
+
+	MDSS_PLL_REG_W(io->pll_base + 0x400,
+		QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + 0x400,
+		QSERDES_TX_L0_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + 0x600,
+		QSERDES_TX_L1_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + 0x600,
+		QSERDES_TX_L1_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + 0x800,
+		QSERDES_TX_L2_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + 0x800,
+		QSERDES_TX_L2_TX_INTERFACE_MODE, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + 0xA00,
+		QSERDES_TX_L3_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + 0xA00,
+		QSERDES_TX_L3_TX_INTERFACE_MODE, 0x00);
+
+	return 0;
+}
+
+static int hdmi_20nm_vco_enable(struct clk *c)
+{
+	u32 ready_poll;
+	u32 time_out_loop;
+	/* Hardware recommended timeout iterator */
+	u32 time_out_max = 50000;
+
+	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x00000000);
+	udelay(100);
+	/* memory barrier */
+	mb();
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x00000003);
+	udelay(100);
+	/* memory barrier */
+	mb();
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x00000009);
+	udelay(100);
+	/* memory barrier */
+	mb();
+
+	/* Poll for C_READY and PHY READY */
+	pr_debug("%s: Waiting for PHY Ready\n", __func__);
+	time_out_loop = 0;
+	do {
+		ready_poll = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_RESET_SM);
+		time_out_loop++;
+		udelay(10);
+	} while (((ready_poll  & (1 << 6)) == 0) &&
+		(time_out_loop < time_out_max));
+	if (time_out_loop >= time_out_max)
+		pr_err("%s: ERROR: TIMED OUT BEFORE C READY\n", __func__);
+	else
+		pr_debug("%s: C READY\n", __func__);
+
+	/* Poll for PHY READY */
+	pr_debug("%s: Waiting for PHY Ready\n", __func__);
+	time_out_loop = 0;
+	do {
+		ready_poll = MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS);
+		time_out_loop++;
+		udelay(1);
+	} while (((ready_poll & 0x1) == 0) && (time_out_loop < time_out_max));
+
+	if (time_out_loop >= time_out_max)
+		pr_err("%s: TIMED OUT BEFORE PHY READY\n", __func__);
+	else
+		pr_debug("%s: HDMI PHY READY\n", __func__);
+
+	io->pll_on = true;
+
+	return 0;
+}
+
+
+static int hdmi_20nm_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	void __iomem		*pll_base;
+	void __iomem		*phy_base;
+	unsigned int set_power_dwn = 0;
+	int rc;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	if (io->pll_on)
+		set_power_dwn = 1;
+
+	pll_base = io->pll_base;
+	phy_base = io->phy_base;
+
+	pr_debug("rate=%ld\n", rate);
+
+	hdmi_20nm_phy_pll_set_clk_rate(c, rate);
+
+	mdss_pll_resource_enable(io, false);
+
+	if (set_power_dwn)
+		hdmi_20nm_vco_enable(c);
+
+	vco->rate = rate;
+	vco->rate_set = true;
+
+	return 0;
+}
+
+static unsigned long hdmi_20nm_vco_get_rate(struct clk *c)
+{
+	unsigned long freq = 0;
+	int rc;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (is_gdsc_disabled(io))
+		return 0;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	mdss_pll_resource_enable(io, false);
+
+	return freq;
+}
+
+static long hdmi_20nm_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+
+	pr_debug("rrate=%ld\n", rrate);
+
+	return rrate;
+}
+
+static int hdmi_20nm_vco_prepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	int ret = 0;
+
+	pr_debug("rate=%ld\n", vco->rate);
+
+	if (!vco->rate_set && vco->rate)
+		ret = hdmi_20nm_vco_set_rate(c, vco->rate);
+
+	if (!ret) {
+		ret = mdss_pll_resource_enable(io, true);
+		if (ret)
+			pr_err("pll resource can't be enabled\n");
+	}
+
+	return ret;
+}
+
+static void hdmi_20nm_vco_unprepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	vco->rate_set = false;
+
+	if (!io) {
+		pr_err("Invalid input parameter\n");
+		return;
+	}
+
+	if (!io->pll_on &&
+		mdss_pll_resource_enable(io, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return;
+	}
+
+	io->handoff_resources = false;
+	mdss_pll_resource_enable(io, false);
+	io->pll_on = false;
+}
+
+static enum handoff hdmi_20nm_vco_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_20nm_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (is_gdsc_disabled(io))
+		return HANDOFF_DISABLED_CLK;
+
+	if (mdss_pll_resource_enable(io, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return ret;
+	}
+
+	io->handoff_resources = true;
+
+	if (hdmi_20nm_pll_lock_status(io)) {
+		io->pll_on = true;
+		c->rate = hdmi_20nm_vco_get_rate(c);
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		io->handoff_resources = false;
+		mdss_pll_resource_enable(io, false);
+	}
+
+	pr_debug("done, ret=%d\n", ret);
+	return ret;
+}
+
+static const struct clk_ops hdmi_20nm_vco_clk_ops = {
+	.enable = hdmi_20nm_vco_enable,
+	.set_rate = hdmi_20nm_vco_set_rate,
+	.get_rate = hdmi_20nm_vco_get_rate,
+	.round_rate = hdmi_20nm_vco_round_rate,
+	.prepare = hdmi_20nm_vco_prepare,
+	.unprepare = hdmi_20nm_vco_unprepare,
+	.handoff = hdmi_20nm_vco_handoff,
+};
+
+static struct hdmi_pll_vco_clk hdmi_20nm_vco_clk = {
+	.ip_seti = (struct hdmi_pll_cfg[]){
+		{550890000, 0x03},
+		{589240000, 0x07},
+		{689290000, 0x03},
+		{727600000, 0x07},
+		{HDMI_PLL_TMDS_MAX, 0x03},
+	},
+	.cp_seti = (struct hdmi_pll_cfg[]){
+		{34440000, 0x3F},
+		{36830000, 0x2F},
+		{68870000, 0x3F},
+		{73660000, 0x2F},
+		{137730000, 0x3F},
+		{147310000, 0x2F},
+		{275450000, 0x3F},
+		{294620000, 0x2F},
+		{344650000, 0x3F},
+		{363800000, 0x2F},
+		{477960000, 0x3F},
+		{512530000, 0x2F},
+		{550890000, 0x1F},
+		{589240000, 0x2F},
+		{630900000, 0x3F},
+		{650590000, 0x2F},
+		{689290000, 0x1F},
+		{727600000, 0x2F},
+		{HDMI_PLL_TMDS_MAX, 0x3F},
+	},
+	.ip_setp = (struct hdmi_pll_cfg[]){
+		{497340000, 0x03},
+		{512530000, 0x07},
+		{535680000, 0x03},
+		{550890000, 0x07},
+		{574060000, 0x03},
+		{727600000, 0x07},
+		{HDMI_PLL_TMDS_MAX, 0x03},
+	},
+	.cp_setp = (struct hdmi_pll_cfg[]){
+		{36830000, 0x1F},
+		{40010000, 0x17},
+		{73660000, 0x1F},
+		{80000000, 0x17},
+		{147310000, 0x1F},
+		{160010000, 0x17},
+		{294620000, 0x1F},
+		{363800000, 0x17},
+		{497340000, 0x0F},
+		{512530000, 0x1F},
+		{535680000, 0x0F},
+		{550890000, 0x1F},
+		{574060000, 0x0F},
+		{589240000, 0x1F},
+		{727600000, 0x17},
+		{HDMI_PLL_TMDS_MAX, 0x07},
+	},
+	.crctrl = (struct hdmi_pll_cfg[]){
+		{40010000, 0xBB},
+		{40030000, 0x77},
+		{80000000, 0xBB},
+		{80060000, 0x77},
+		{160010000, 0xBB},
+		{160120000, 0x77},
+		{772930000, 0xBB},
+		{HDMI_PLL_TMDS_MAX, 0xFF},
+	},
+	.c = {
+		.dbg_name = "hdmi_20nm_vco_clk",
+		.ops = &hdmi_20nm_vco_clk_ops,
+		CLK_INIT(hdmi_20nm_vco_clk.c),
+	},
+};
+
+static struct clk_lookup hdmipllcc_8994[] = {
+	CLK_LIST(hdmi_20nm_vco_clk),
+};
+
+int hdmi_20nm_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP;
+
+	/* Set client data for vco, mux and div clocks */
+	hdmi_20nm_vco_clk.priv = pll_res;
+
+	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8994,
+						 ARRAY_SIZE(hdmipllcc_8994));
+	if (rc) {
+		pr_err("Clock register failed rc=%d\n", rc);
+		rc = -EPROBE_DEFER;
+	} else {
+		pr_debug("%s: SUCCESS\n", __func__);
+	}
+
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-28hpm.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-28hpm.c
new file mode 100644
index 0000000..6d3a7f5
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-hdmi-pll-28hpm.c
@@ -0,0 +1,1091 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+/* hdmi phy registers */
+#define HDMI_PHY_ANA_CFG0			(0x0000)
+#define HDMI_PHY_ANA_CFG1			(0x0004)
+#define HDMI_PHY_ANA_CFG2			(0x0008)
+#define HDMI_PHY_ANA_CFG3			(0x000C)
+#define HDMI_PHY_PD_CTRL0			(0x0010)
+#define HDMI_PHY_PD_CTRL1			(0x0014)
+#define HDMI_PHY_GLB_CFG			(0x0018)
+#define HDMI_PHY_DCC_CFG0			(0x001C)
+#define HDMI_PHY_DCC_CFG1			(0x0020)
+#define HDMI_PHY_TXCAL_CFG0			(0x0024)
+#define HDMI_PHY_TXCAL_CFG1			(0x0028)
+#define HDMI_PHY_TXCAL_CFG2			(0x002C)
+#define HDMI_PHY_TXCAL_CFG3			(0x0030)
+#define HDMI_PHY_BIST_CFG0			(0x0034)
+#define HDMI_PHY_BIST_CFG1			(0x0038)
+#define HDMI_PHY_BIST_PATN0			(0x003C)
+#define HDMI_PHY_BIST_PATN1			(0x0040)
+#define HDMI_PHY_BIST_PATN2			(0x0044)
+#define HDMI_PHY_BIST_PATN3			(0x0048)
+#define HDMI_PHY_STATUS				(0x005C)
+
+/* hdmi phy unified pll registers */
+#define HDMI_UNI_PLL_REFCLK_CFG			(0x0000)
+#define HDMI_UNI_PLL_POSTDIV1_CFG		(0x0004)
+#define HDMI_UNI_PLL_CHFPUMP_CFG		(0x0008)
+#define HDMI_UNI_PLL_VCOLPF_CFG			(0x000C)
+#define HDMI_UNI_PLL_VREG_CFG			(0x0010)
+#define HDMI_UNI_PLL_PWRGEN_CFG			(0x0014)
+#define HDMI_UNI_PLL_GLB_CFG			(0x0020)
+#define HDMI_UNI_PLL_POSTDIV2_CFG		(0x0024)
+#define HDMI_UNI_PLL_POSTDIV3_CFG		(0x0028)
+#define HDMI_UNI_PLL_LPFR_CFG			(0x002C)
+#define HDMI_UNI_PLL_LPFC1_CFG			(0x0030)
+#define HDMI_UNI_PLL_LPFC2_CFG			(0x0034)
+#define HDMI_UNI_PLL_SDM_CFG0			(0x0038)
+#define HDMI_UNI_PLL_SDM_CFG1			(0x003C)
+#define HDMI_UNI_PLL_SDM_CFG2			(0x0040)
+#define HDMI_UNI_PLL_SDM_CFG3			(0x0044)
+#define HDMI_UNI_PLL_SDM_CFG4			(0x0048)
+#define HDMI_UNI_PLL_SSC_CFG0			(0x004C)
+#define HDMI_UNI_PLL_SSC_CFG1			(0x0050)
+#define HDMI_UNI_PLL_SSC_CFG2			(0x0054)
+#define HDMI_UNI_PLL_SSC_CFG3			(0x0058)
+#define HDMI_UNI_PLL_LKDET_CFG0			(0x005C)
+#define HDMI_UNI_PLL_LKDET_CFG1			(0x0060)
+#define HDMI_UNI_PLL_LKDET_CFG2			(0x0064)
+#define HDMI_UNI_PLL_CAL_CFG0			(0x006C)
+#define HDMI_UNI_PLL_CAL_CFG1			(0x0070)
+#define HDMI_UNI_PLL_CAL_CFG2			(0x0074)
+#define HDMI_UNI_PLL_CAL_CFG3			(0x0078)
+#define HDMI_UNI_PLL_CAL_CFG4			(0x007C)
+#define HDMI_UNI_PLL_CAL_CFG5			(0x0080)
+#define HDMI_UNI_PLL_CAL_CFG6			(0x0084)
+#define HDMI_UNI_PLL_CAL_CFG7			(0x0088)
+#define HDMI_UNI_PLL_CAL_CFG8			(0x008C)
+#define HDMI_UNI_PLL_CAL_CFG9			(0x0090)
+#define HDMI_UNI_PLL_CAL_CFG10			(0x0094)
+#define HDMI_UNI_PLL_CAL_CFG11			(0x0098)
+#define HDMI_UNI_PLL_STATUS			(0x00C0)
+
+#define HDMI_PLL_POLL_DELAY_US			50
+#define HDMI_PLL_POLL_TIMEOUT_US		500
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+static void hdmi_vco_disable(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
+
+	if (!hdmi_pll_res) {
+		pr_err("Invalid input parameter\n");
+		return;
+	}
+
+	if (!hdmi_pll_res->pll_on &&
+		mdss_pll_resource_enable(hdmi_pll_res, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return;
+	}
+
+	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0);
+	udelay(5);
+	MDSS_PLL_REG_W(hdmi_pll_res->phy_base, HDMI_PHY_GLB_CFG, 0x0);
+
+	hdmi_pll_res->handoff_resources = false;
+	mdss_pll_resource_enable(hdmi_pll_res, false);
+	hdmi_pll_res->pll_on = false;
+} /* hdmi_vco_disable */
+
+static int hdmi_vco_enable(struct clk *c)
+{
+	u32 status;
+	u32 delay_us, timeout_us;
+	int rc;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
+
+	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	/* Global Enable */
+	MDSS_PLL_REG_W(hdmi_pll_res->phy_base, HDMI_PHY_GLB_CFG, 0x81);
+	/* Power up power gen */
+	MDSS_PLL_REG_W(hdmi_pll_res->phy_base, HDMI_PHY_PD_CTRL0, 0x00);
+	udelay(350);
+
+	/* PLL Power-Up */
+	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+	udelay(5);
+	/* Power up PLL LDO */
+	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_UNI_PLL_GLB_CFG, 0x03);
+	udelay(350);
+
+	/* PLL Power-Up */
+	MDSS_PLL_REG_W(hdmi_pll_res->pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+	udelay(350);
+
+	/* poll for PLL ready status */
+	delay_us = 100;
+	timeout_us = 2000;
+	if (readl_poll_timeout_atomic(
+		(hdmi_pll_res->pll_base + HDMI_UNI_PLL_STATUS),
+		status, ((status & BIT(0)) == 1), delay_us, timeout_us)) {
+		pr_err("hdmi phy pll status=%x failed to Lock\n", status);
+		hdmi_vco_disable(c);
+		mdss_pll_resource_enable(hdmi_pll_res, false);
+		return -EINVAL;
+	}
+	pr_debug("hdmi phy pll is locked\n");
+
+	udelay(350);
+	/* poll for PHY ready status */
+	delay_us = 100;
+	timeout_us = 2000;
+	if (readl_poll_timeout_atomic(
+		(hdmi_pll_res->phy_base + HDMI_PHY_STATUS),
+		status, ((status & BIT(0)) == 1), delay_us, timeout_us)) {
+		pr_err("hdmi phy status=%x failed to Lock\n", status);
+		hdmi_vco_disable(c);
+		mdss_pll_resource_enable(hdmi_pll_res, false);
+		return -EINVAL;
+	}
+	hdmi_pll_res->pll_on = true;
+	pr_debug("hdmi phy is locked\n");
+
+	return 0;
+} /* hdmi_vco_enable */
+
+
+static void hdmi_phy_pll_calculator(u32 vco_freq,
+				struct mdss_pll_resources *hdmi_pll_res)
+{
+	u32 ref_clk             = 19200000;
+	u32 sdm_mode            = 1;
+	u32 ref_clk_multiplier  = sdm_mode == 1 ? 2 : 1;
+	u32 int_ref_clk_freq    = ref_clk * ref_clk_multiplier;
+	u32 fbclk_pre_div       = 1;
+	u32 ssc_mode            = 0;
+	u32 kvco                = 270;
+	u32 vdd                 = 95;
+	u32 ten_power_six       = 1000000;
+	u32 ssc_ds_ppm          = ssc_mode ? 5000 : 0;
+	u32 sdm_res             = 16;
+	u32 ssc_tri_step        = 32;
+	u32 ssc_freq            = 2;
+	u64 ssc_ds              = vco_freq * ssc_ds_ppm;
+	u32 div_in_freq         = vco_freq / fbclk_pre_div;
+	u64 dc_offset           = (div_in_freq / int_ref_clk_freq - 1) *
+					ten_power_six * 10;
+	u32 ssc_kdiv            = (int_ref_clk_freq / ssc_freq) -
+					ten_power_six;
+	u64 sdm_freq_seed;
+	u32 ssc_tri_inc;
+	u64 fb_div_n;
+	void __iomem		*pll_base = hdmi_pll_res->pll_base;
+	u32 val;
+
+	pr_debug("vco_freq = %u\n", vco_freq);
+
+	do_div(ssc_ds, (u64)ten_power_six);
+
+	fb_div_n = (u64)div_in_freq * (u64)ten_power_six * 10;
+	do_div(fb_div_n, int_ref_clk_freq);
+
+	sdm_freq_seed = ((fb_div_n - dc_offset - ten_power_six * 10) *
+				(1 << sdm_res)  * 10) + 5;
+	do_div(sdm_freq_seed, ((u64)ten_power_six * 100));
+
+	ssc_tri_inc = (u32)ssc_ds;
+	ssc_tri_inc = (ssc_tri_inc / int_ref_clk_freq) * (1 << 16) /
+			ssc_tri_step;
+
+	val = (ref_clk_multiplier == 2 ? 1 : 0) +
+		((fbclk_pre_div == 2 ? 1 : 0) * 16);
+	pr_debug("HDMI_UNI_PLL_REFCLK_CFG = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, val);
+
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CHFPUMP_CFG, 0x02);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_PWRGEN_CFG, 0x00);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+
+	do_div(dc_offset, (u64)ten_power_six * 10);
+	val = sdm_mode == 0 ? 64 + dc_offset : 0;
+	pr_debug("HDMI_UNI_PLL_SDM_CFG0 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, val);
+
+	val = 64 + dc_offset;
+	pr_debug("HDMI_UNI_PLL_SDM_CFG1 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, val);
+
+	val = sdm_freq_seed & 0xFF;
+	pr_debug("HDMI_UNI_PLL_SDM_CFG2 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, val);
+
+	val = (sdm_freq_seed >> 8) & 0xFF;
+	pr_debug("HDMI_UNI_PLL_SDM_CFG3 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, val);
+
+	val = (sdm_freq_seed >> 16) & 0xFF;
+	pr_debug("HDMI_UNI_PLL_SDM_CFG4 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, val);
+
+	val = (ssc_mode == 0 ? 128 : 0) + (ssc_kdiv / ten_power_six);
+	pr_debug("HDMI_UNI_PLL_SSC_CFG0 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SSC_CFG0, val);
+
+	val = ssc_tri_inc & 0xFF;
+	pr_debug("HDMI_UNI_PLL_SSC_CFG1 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SSC_CFG1, val);
+
+	val = (ssc_tri_inc >> 8) & 0xFF;
+	pr_debug("HDMI_UNI_PLL_SSC_CFG2 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SSC_CFG2, val);
+
+	pr_debug("HDMI_UNI_PLL_SSC_CFG3 = 0x%x\n", ssc_tri_step);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SSC_CFG3, ssc_tri_step);
+
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG0, 0x0A);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG1, 0x04);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG3, 0x00);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG4, 0x00);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG5, 0x00);
+
+	val = (kvco * vdd * 10000) / 6;
+	val += 500000;
+	val /= ten_power_six;
+	pr_debug("HDMI_UNI_PLL_CAL_CFG6 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG6, val & 0xFF);
+
+	val = (kvco * vdd * 10000) / 6;
+	val -= ten_power_six;
+	val /= ten_power_six;
+	val = (val >> 8) & 0xFF;
+	pr_debug("HDMI_UNI_PLL_CAL_CFG7 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG7, val);
+
+	val = (ref_clk * 5) / ten_power_six;
+	pr_debug("HDMI_UNI_PLL_CAL_CFG8 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, val);
+
+	val = ((ref_clk * 5) / ten_power_six) >> 8;
+	pr_debug("HDMI_UNI_PLL_CAL_CFG9 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, val);
+
+	vco_freq /= ten_power_six;
+	val = vco_freq & 0xFF;
+	pr_debug("HDMI_UNI_PLL_CAL_CFG10 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, val);
+
+	val = vco_freq >> 8;
+	pr_debug("HDMI_UNI_PLL_CAL_CFG11 = 0x%x\n", val);
+	MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, val);
+} /* hdmi_phy_pll_calculator */
+
+static int hdmi_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
+	void __iomem		*pll_base;
+	void __iomem		*phy_base;
+	unsigned int set_power_dwn = 0;
+	int rc;
+
+	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	if (hdmi_pll_res->pll_on) {
+		hdmi_vco_disable(c);
+		set_power_dwn = 1;
+	}
+
+	pll_base = hdmi_pll_res->pll_base;
+	phy_base = hdmi_pll_res->phy_base;
+
+	pr_debug("rate=%ld\n", rate);
+
+	switch (rate) {
+	case 0:
+		break;
+
+	case 756000000:
+		/* 640x480p60 */
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x52);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0xB0);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0xF4);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		udelay(200);
+		break;
+
+	case 810000000:
+		/* 576p50/576i50 case */
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x54);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0x18);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x2A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x03);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		udelay(200);
+		break;
+
+	case 810900000:
+		/* 480p60/480i60 case */
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x54);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x66);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0x1D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x2A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x03);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		udelay(200);
+		break;
+
+	case 650000000:
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x4F);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x55);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0xED);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x8A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		udelay(200);
+		break;
+
+	case 742500000:
+		/*
+		 * 720p60/720p50/1080i60/1080i50
+		 * 1080p24/1080p30/1080p25 case
+		 */
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x52);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0x56);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0xE6);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		udelay(200);
+		break;
+
+	case 1080000000:
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x5B);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x38);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		udelay(200);
+		break;
+
+	case 1342500000:
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x36);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x61);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0xF6);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0x3E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x05);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x05);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x11);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		udelay(200);
+		break;
+
+	case 1485000000:
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_REFCLK_CFG, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VCOLPF_CFG, 0x19);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFR_CFG, 0x0E);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC1_CFG, 0x20);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LPFC2_CFG, 0x0D);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG0, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG1, 0x65);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG2, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG3, 0xAC);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_SDM_CFG4, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG0, 0x10);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG1, 0x1A);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_LKDET_CFG2, 0x05);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV2_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_POSTDIV3_CFG, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG2, 0x01);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG8, 0x60);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG9, 0x00);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG10, 0xCD);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_CAL_CFG11, 0x05);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x06);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x03);
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x02);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		udelay(200);
+		break;
+
+	default:
+		pr_debug("Use pll settings calculator for rate=%ld\n", rate);
+
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_GLB_CFG, 0x81);
+		hdmi_phy_pll_calculator(rate, hdmi_pll_res);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL0, 0x1F);
+		udelay(50);
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_GLB_CFG, 0x0F);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_PD_CTRL1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x10);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG0, 0xDB);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG1, 0x43);
+
+		if (rate < 825000000) {
+			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x01);
+			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x00);
+		} else if (rate >= 825000000 && rate < 1342500000) {
+			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x05);
+			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x03);
+		} else {
+			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG2, 0x06);
+			MDSS_PLL_REG_W(phy_base, HDMI_PHY_ANA_CFG3, 0x03);
+		}
+
+		MDSS_PLL_REG_W(pll_base, HDMI_UNI_PLL_VREG_CFG, 0x04);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG0, 0xD0);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_DCC_CFG1, 0x1A);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG1, 0x00);
+
+		if (rate < 825000000)
+			MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x01);
+		else
+			MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG2, 0x00);
+
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_TXCAL_CFG3, 0x05);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_PATN0, 0x62);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_PATN1, 0x03);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_PATN2, 0x69);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_PATN3, 0x02);
+
+		udelay(200);
+
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_CFG1, 0x00);
+		MDSS_PLL_REG_W(phy_base, HDMI_PHY_BIST_CFG0, 0x00);
+	}
+
+	/* Make sure writes complete before disabling iface clock */
+	mb();
+
+	mdss_pll_resource_enable(hdmi_pll_res, false);
+
+	if (set_power_dwn)
+		hdmi_vco_enable(c);
+
+	vco->rate = rate;
+	vco->rate_set = true;
+
+	return 0;
+} /* hdmi_pll_set_rate */
+
+/* HDMI PLL DIV CLK */
+
+static unsigned long hdmi_vco_get_rate(struct clk *c)
+{
+	unsigned long freq = 0;
+	int rc;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
+
+	if (is_gdsc_disabled(hdmi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	freq = MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
+			HDMI_UNI_PLL_CAL_CFG11) << 8 |
+		MDSS_PLL_REG_R(hdmi_pll_res->pll_base, HDMI_UNI_PLL_CAL_CFG10);
+
+	switch (freq) {
+	case 742:
+		freq = 742500000;
+		break;
+	case 810:
+		if (MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
+					HDMI_UNI_PLL_SDM_CFG3) == 0x18)
+			freq = 810000000;
+		else
+			freq = 810900000;
+		break;
+	case 1342:
+		freq = 1342500000;
+		break;
+	default:
+		freq *= 1000000;
+	}
+	mdss_pll_resource_enable(hdmi_pll_res, false);
+
+	return freq;
+}
+
+static long hdmi_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	pr_debug("rrate=%ld\n", rrate);
+
+	return rrate;
+}
+
+static int hdmi_vco_prepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	int ret = 0;
+
+	pr_debug("rate=%ld\n", vco->rate);
+
+	if (!vco->rate_set && vco->rate)
+		ret = hdmi_vco_set_rate(c, vco->rate);
+
+	return ret;
+}
+
+static void hdmi_vco_unprepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+
+	vco->rate_set = false;
+}
+
+static int hdmi_pll_lock_status(struct mdss_pll_resources *hdmi_pll_res)
+{
+	u32 status;
+	int pll_locked = 0;
+	int rc;
+
+	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	/* poll for PLL ready status */
+	if (readl_poll_timeout_atomic(
+			(hdmi_pll_res->phy_base + HDMI_PHY_STATUS),
+			status, ((status & BIT(0)) == 1),
+			HDMI_PLL_POLL_DELAY_US,
+			HDMI_PLL_POLL_TIMEOUT_US)) {
+		pr_debug("HDMI PLL status=%x failed to Lock\n", status);
+		pll_locked = 0;
+	} else {
+		pll_locked = 1;
+	}
+	mdss_pll_resource_enable(hdmi_pll_res, false);
+
+	return pll_locked;
+}
+
+static enum handoff hdmi_vco_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *hdmi_pll_res = vco->priv;
+
+	if (is_gdsc_disabled(hdmi_pll_res))
+		return HANDOFF_DISABLED_CLK;
+
+	if (mdss_pll_resource_enable(hdmi_pll_res, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return ret;
+	}
+
+	hdmi_pll_res->handoff_resources = true;
+
+	if (hdmi_pll_lock_status(hdmi_pll_res)) {
+		hdmi_pll_res->pll_on = true;
+		c->rate = hdmi_vco_get_rate(c);
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		hdmi_pll_res->handoff_resources = false;
+		mdss_pll_resource_enable(hdmi_pll_res, false);
+	}
+
+	pr_debug("done, ret=%d\n", ret);
+	return ret;
+}
+
+static const struct clk_ops hdmi_vco_clk_ops = {
+	.enable = hdmi_vco_enable,
+	.set_rate = hdmi_vco_set_rate,
+	.get_rate = hdmi_vco_get_rate,
+	.round_rate = hdmi_vco_round_rate,
+	.prepare = hdmi_vco_prepare,
+	.unprepare = hdmi_vco_unprepare,
+	.disable = hdmi_vco_disable,
+	.handoff = hdmi_vco_handoff,
+};
+
+static struct hdmi_pll_vco_clk hdmi_vco_clk = {
+	.min_rate = 600000000,
+	.max_rate = 1800000000,
+	.c = {
+		.dbg_name = "hdmi_vco_clk",
+		.ops = &hdmi_vco_clk_ops,
+		CLK_INIT(hdmi_vco_clk.c),
+	},
+};
+
+struct div_clk hdmipll_div1_clk = {
+	.data = {
+		.div = 1,
+		.min_div = 1,
+		.max_div = 1,
+	},
+	.c = {
+		.parent = &hdmi_vco_clk.c,
+		.dbg_name = "hdmipll_div1_clk",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(hdmipll_div1_clk.c),
+	},
+};
+
+struct div_clk hdmipll_div2_clk = {
+	.data = {
+		.div = 2,
+		.min_div = 2,
+		.max_div = 2,
+	},
+	.c = {
+		.parent = &hdmi_vco_clk.c,
+		.dbg_name = "hdmipll_div2_clk",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(hdmipll_div2_clk.c),
+	},
+};
+
+struct div_clk hdmipll_div4_clk = {
+	.data = {
+		.div = 4,
+		.min_div = 4,
+		.max_div = 4,
+	},
+	.c = {
+		.parent = &hdmi_vco_clk.c,
+		.dbg_name = "hdmipll_div4_clk",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(hdmipll_div4_clk.c),
+	},
+};
+
+struct div_clk hdmipll_div6_clk = {
+	.data = {
+		.div = 6,
+		.min_div = 6,
+		.max_div = 6,
+	},
+	.c = {
+		.parent = &hdmi_vco_clk.c,
+		.dbg_name = "hdmipll_div6_clk",
+		.ops = &clk_ops_div,
+		.flags = CLKFLAG_NO_RATE_CACHE,
+		CLK_INIT(hdmipll_div6_clk.c),
+	},
+};
+
+static int hdmipll_set_mux_sel(struct mux_clk *clk, int mux_sel)
+{
+	struct mdss_pll_resources *hdmi_pll_res = clk->priv;
+	int rc;
+
+	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	pr_debug("mux_sel=%d\n", mux_sel);
+	MDSS_PLL_REG_W(hdmi_pll_res->pll_base,
+				HDMI_UNI_PLL_POSTDIV1_CFG, mux_sel);
+	mdss_pll_resource_enable(hdmi_pll_res, false);
+
+	return 0;
+}
+
+static int hdmipll_get_mux_sel(struct mux_clk *clk)
+{
+	int rc;
+	int mux_sel = 0;
+	struct mdss_pll_resources *hdmi_pll_res = clk->priv;
+
+	if (is_gdsc_disabled(hdmi_pll_res))
+		return 0;
+
+	rc = mdss_pll_resource_enable(hdmi_pll_res, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	mux_sel = MDSS_PLL_REG_R(hdmi_pll_res->pll_base,
+				HDMI_UNI_PLL_POSTDIV1_CFG);
+	mdss_pll_resource_enable(hdmi_pll_res, false);
+	mux_sel &= 0x03;
+	pr_debug("mux_sel=%d\n", mux_sel);
+
+	return mux_sel;
+}
+
+static struct clk_mux_ops hdmipll_mux_ops = {
+	.set_mux_sel = hdmipll_set_mux_sel,
+	.get_mux_sel = hdmipll_get_mux_sel,
+};
+
+static const struct clk_ops hdmi_mux_ops;
+
+static int hdmi_mux_prepare(struct clk *c)
+{
+	int ret = 0;
+
+	if (c && c->ops && c->ops->set_rate)
+		ret = c->ops->set_rate(c, c->rate);
+
+	return ret;
+}
+
+struct mux_clk hdmipll_mux_clk = {
+	MUX_SRC_LIST(
+		{ &hdmipll_div1_clk.c, 0 },
+		{ &hdmipll_div2_clk.c, 1 },
+		{ &hdmipll_div4_clk.c, 2 },
+		{ &hdmipll_div6_clk.c, 3 },
+	),
+	.ops = &hdmipll_mux_ops,
+	.c = {
+		.parent = &hdmipll_div1_clk.c,
+		.dbg_name = "hdmipll_mux_clk",
+		.ops = &hdmi_mux_ops,
+		CLK_INIT(hdmipll_mux_clk.c),
+	},
+};
+
+struct div_clk hdmipll_clk_src = {
+	.data = {
+		.div = 5,
+		.min_div = 5,
+		.max_div = 5,
+	},
+	.c = {
+		.parent = &hdmipll_mux_clk.c,
+		.dbg_name = "hdmipll_clk_src",
+		.ops = &clk_ops_div,
+		CLK_INIT(hdmipll_clk_src.c),
+	},
+};
+
+static struct clk_lookup hdmipllcc_8974[] = {
+	CLK_LOOKUP("extp_clk_src", hdmipll_clk_src.c,
+						"fd8c0000.qcom,mmsscc-mdss"),
+};
+
+int hdmi_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = -ENOTSUPP;
+
+	/* Set client data for vco, mux and div clocks */
+	hdmipll_clk_src.priv = pll_res;
+	hdmipll_mux_clk.priv = pll_res;
+	hdmipll_div1_clk.priv = pll_res;
+	hdmipll_div2_clk.priv = pll_res;
+	hdmipll_div4_clk.priv = pll_res;
+	hdmipll_div6_clk.priv = pll_res;
+	hdmi_vco_clk.priv = pll_res;
+
+	/* Set hdmi mux clock operation */
+	hdmi_mux_ops = clk_ops_gen_mux;
+	hdmi_mux_ops.prepare = hdmi_mux_prepare;
+
+	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8974,
+						 ARRAY_SIZE(hdmipllcc_8974));
+	if (rc) {
+		pr_err("Clock register failed rc=%d\n", rc);
+		rc = -EPROBE_DEFER;
+	}
+
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-8996.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-8996.c
new file mode 100644
index 0000000..951970a
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-hdmi-pll-8996.c
@@ -0,0 +1,2675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8996.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+/* CONSTANTS */
+#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO            10
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD         3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD          1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD          750000000
+#define HDMI_CLKS_PLL_DIVSEL                     0
+#define HDMI_CORECLK_DIV                         5
+#define HDMI_REF_CLOCK                           19200000
+#define HDMI_64B_ERR_VAL                         0xFFFFFFFFFFFFFFFF
+#define HDMI_VERSION_8996_V1                     1
+#define HDMI_VERSION_8996_V2                     2
+#define HDMI_VERSION_8996_V3                     3
+#define HDMI_VERSION_8996_V3_1_8                 4
+
+#define HDMI_VCO_MAX_FREQ                        12000000000
+#define HDMI_VCO_MIN_FREQ                        8000000000
+#define HDMI_2400MHZ_BIT_CLK_HZ                  2400000000UL
+#define HDMI_2250MHZ_BIT_CLK_HZ                  2250000000UL
+#define HDMI_2000MHZ_BIT_CLK_HZ                  2000000000UL
+#define HDMI_1700MHZ_BIT_CLK_HZ                  1700000000UL
+#define HDMI_1200MHZ_BIT_CLK_HZ                  1200000000UL
+#define HDMI_1334MHZ_BIT_CLK_HZ                  1334000000UL
+#define HDMI_1000MHZ_BIT_CLK_HZ                  1000000000UL
+#define HDMI_850MHZ_BIT_CLK_HZ                   850000000
+#define HDMI_667MHZ_BIT_CLK_HZ                   667000000
+#define HDMI_600MHZ_BIT_CLK_HZ                   600000000
+#define HDMI_500MHZ_BIT_CLK_HZ                   500000000
+#define HDMI_450MHZ_BIT_CLK_HZ                   450000000
+#define HDMI_334MHZ_BIT_CLK_HZ                   334000000
+#define HDMI_300MHZ_BIT_CLK_HZ                   300000000
+#define HDMI_282MHZ_BIT_CLK_HZ                   282000000
+#define HDMI_250MHZ_BIT_CLK_HZ                   250000000
+#define HDMI_KHZ_TO_HZ                           1000
+
+/* PLL REGISTERS */
+#define QSERDES_COM_ATB_SEL1                     (0x000)
+#define QSERDES_COM_ATB_SEL2                     (0x004)
+#define QSERDES_COM_FREQ_UPDATE                  (0x008)
+#define QSERDES_COM_BG_TIMER                     (0x00C)
+#define QSERDES_COM_SSC_EN_CENTER                (0x010)
+#define QSERDES_COM_SSC_ADJ_PER1                 (0x014)
+#define QSERDES_COM_SSC_ADJ_PER2                 (0x018)
+#define QSERDES_COM_SSC_PER1                     (0x01C)
+#define QSERDES_COM_SSC_PER2                     (0x020)
+#define QSERDES_COM_SSC_STEP_SIZE1               (0x024)
+#define QSERDES_COM_SSC_STEP_SIZE2               (0x028)
+#define QSERDES_COM_POST_DIV                     (0x02C)
+#define QSERDES_COM_POST_DIV_MUX                 (0x030)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN          (0x034)
+#define QSERDES_COM_CLK_ENABLE1                  (0x038)
+#define QSERDES_COM_SYS_CLK_CTRL                 (0x03C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE            (0x040)
+#define QSERDES_COM_PLL_EN                       (0x044)
+#define QSERDES_COM_PLL_IVCO                     (0x048)
+#define QSERDES_COM_LOCK_CMP1_MODE0              (0x04C)
+#define QSERDES_COM_LOCK_CMP2_MODE0              (0x050)
+#define QSERDES_COM_LOCK_CMP3_MODE0              (0x054)
+#define QSERDES_COM_LOCK_CMP1_MODE1              (0x058)
+#define QSERDES_COM_LOCK_CMP2_MODE1              (0x05C)
+#define QSERDES_COM_LOCK_CMP3_MODE1              (0x060)
+#define QSERDES_COM_LOCK_CMP1_MODE2              (0x064)
+#define QSERDES_COM_CMN_RSVD0                    (0x064)
+#define QSERDES_COM_LOCK_CMP2_MODE2              (0x068)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTRL         (0x068)
+#define QSERDES_COM_LOCK_CMP3_MODE2              (0x06C)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS       (0x06C)
+#define QSERDES_COM_BG_TRIM                      (0x070)
+#define QSERDES_COM_CLK_EP_DIV                   (0x074)
+#define QSERDES_COM_CP_CTRL_MODE0                (0x078)
+#define QSERDES_COM_CP_CTRL_MODE1                (0x07C)
+#define QSERDES_COM_CP_CTRL_MODE2                (0x080)
+#define QSERDES_COM_CMN_RSVD1                    (0x080)
+#define QSERDES_COM_PLL_RCTRL_MODE0              (0x084)
+#define QSERDES_COM_PLL_RCTRL_MODE1              (0x088)
+#define QSERDES_COM_PLL_RCTRL_MODE2              (0x08C)
+#define QSERDES_COM_CMN_RSVD2                    (0x08C)
+#define QSERDES_COM_PLL_CCTRL_MODE0              (0x090)
+#define QSERDES_COM_PLL_CCTRL_MODE1              (0x094)
+#define QSERDES_COM_PLL_CCTRL_MODE2              (0x098)
+#define QSERDES_COM_CMN_RSVD3                    (0x098)
+#define QSERDES_COM_PLL_CNTRL                    (0x09C)
+#define QSERDES_COM_PHASE_SEL_CTRL               (0x0A0)
+#define QSERDES_COM_PHASE_SEL_DC                 (0x0A4)
+#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL         (0x0A8)
+#define QSERDES_COM_BIAS_EN_CTRL_BY_PSM          (0x0A8)
+#define QSERDES_COM_SYSCLK_EN_SEL                (0x0AC)
+#define QSERDES_COM_CML_SYSCLK_SEL               (0x0B0)
+#define QSERDES_COM_RESETSM_CNTRL                (0x0B4)
+#define QSERDES_COM_RESETSM_CNTRL2               (0x0B8)
+#define QSERDES_COM_RESTRIM_CTRL                 (0x0BC)
+#define QSERDES_COM_RESTRIM_CTRL2                (0x0C0)
+#define QSERDES_COM_RESCODE_DIV_NUM              (0x0C4)
+#define QSERDES_COM_LOCK_CMP_EN                  (0x0C8)
+#define QSERDES_COM_LOCK_CMP_CFG                 (0x0CC)
+#define QSERDES_COM_DEC_START_MODE0              (0x0D0)
+#define QSERDES_COM_DEC_START_MODE1              (0x0D4)
+#define QSERDES_COM_DEC_START_MODE2              (0x0D8)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL          (0x0D8)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0        (0x0DC)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0        (0x0E0)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0        (0x0E4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1        (0x0E8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1        (0x0EC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1        (0x0F0)
+#define QSERDES_COM_DIV_FRAC_START1_MODE2        (0x0F4)
+#define QSERDES_COM_VCO_TUNE_MINVAL1             (0x0F4)
+#define QSERDES_COM_DIV_FRAC_START2_MODE2        (0x0F8)
+#define QSERDES_COM_VCO_TUNE_MINVAL2             (0x0F8)
+#define QSERDES_COM_DIV_FRAC_START3_MODE2        (0x0FC)
+#define QSERDES_COM_CMN_RSVD4                    (0x0FC)
+#define QSERDES_COM_INTEGLOOP_INITVAL            (0x100)
+#define QSERDES_COM_INTEGLOOP_EN                 (0x104)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0        (0x108)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0        (0x10C)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1        (0x110)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1        (0x114)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE2        (0x118)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1             (0x118)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE2        (0x11C)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2             (0x11C)
+#define QSERDES_COM_RES_TRIM_CONTROL2            (0x120)
+#define QSERDES_COM_VCO_TUNE_CTRL                (0x124)
+#define QSERDES_COM_VCO_TUNE_MAP                 (0x128)
+#define QSERDES_COM_VCO_TUNE1_MODE0              (0x12C)
+#define QSERDES_COM_VCO_TUNE2_MODE0              (0x130)
+#define QSERDES_COM_VCO_TUNE1_MODE1              (0x134)
+#define QSERDES_COM_VCO_TUNE2_MODE1              (0x138)
+#define QSERDES_COM_VCO_TUNE1_MODE2              (0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL1            (0x13C)
+#define QSERDES_COM_VCO_TUNE2_MODE2              (0x140)
+#define QSERDES_COM_VCO_TUNE_INITVAL2            (0x140)
+#define QSERDES_COM_VCO_TUNE_TIMER1              (0x144)
+#define QSERDES_COM_VCO_TUNE_TIMER2              (0x148)
+#define QSERDES_COM_SAR                          (0x14C)
+#define QSERDES_COM_SAR_CLK                      (0x150)
+#define QSERDES_COM_SAR_CODE_OUT_STATUS          (0x154)
+#define QSERDES_COM_SAR_CODE_READY_STATUS        (0x158)
+#define QSERDES_COM_CMN_STATUS                   (0x15C)
+#define QSERDES_COM_RESET_SM_STATUS              (0x160)
+#define QSERDES_COM_RESTRIM_CODE_STATUS          (0x164)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS          (0x168)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS          (0x16C)
+#define QSERDES_COM_BG_CTRL                      (0x170)
+#define QSERDES_COM_CLK_SELECT                   (0x174)
+#define QSERDES_COM_HSCLK_SEL                    (0x178)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS     (0x17C)
+#define QSERDES_COM_PLL_ANALOG                   (0x180)
+#define QSERDES_COM_CORECLK_DIV                  (0x184)
+#define QSERDES_COM_SW_RESET                     (0x188)
+#define QSERDES_COM_CORE_CLK_EN                  (0x18C)
+#define QSERDES_COM_C_READY_STATUS               (0x190)
+#define QSERDES_COM_CMN_CONFIG                   (0x194)
+#define QSERDES_COM_CMN_RATE_OVERRIDE            (0x198)
+#define QSERDES_COM_SVS_MODE_CLK_SEL             (0x19C)
+#define QSERDES_COM_DEBUG_BUS0                   (0x1A0)
+#define QSERDES_COM_DEBUG_BUS1                   (0x1A4)
+#define QSERDES_COM_DEBUG_BUS2                   (0x1A8)
+#define QSERDES_COM_DEBUG_BUS3                   (0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL                (0x1B0)
+#define QSERDES_COM_CMN_MISC1                    (0x1B4)
+#define QSERDES_COM_CMN_MISC2                    (0x1B8)
+#define QSERDES_COM_CORECLK_DIV_MODE1            (0x1BC)
+#define QSERDES_COM_CORECLK_DIV_MODE2            (0x1C0)
+#define QSERDES_COM_CMN_RSVD5                    (0x1C0)
+
+/* Tx Channel base addresses */
+#define HDMI_TX_L0_BASE_OFFSET                   (0x400)
+#define HDMI_TX_L1_BASE_OFFSET                   (0x600)
+#define HDMI_TX_L2_BASE_OFFSET                   (0x800)
+#define HDMI_TX_L3_BASE_OFFSET                   (0xA00)
+
+/* Tx Channel PHY registers */
+#define QSERDES_TX_L0_BIST_MODE_LANENO                    (0x000)
+#define QSERDES_TX_L0_BIST_INVERT                         (0x004)
+#define QSERDES_TX_L0_CLKBUF_ENABLE                       (0x008)
+#define QSERDES_TX_L0_CMN_CONTROL_ONE                     (0x00C)
+#define QSERDES_TX_L0_CMN_CONTROL_TWO                     (0x010)
+#define QSERDES_TX_L0_CMN_CONTROL_THREE                   (0x014)
+#define QSERDES_TX_L0_TX_EMP_POST1_LVL                    (0x018)
+#define QSERDES_TX_L0_TX_POST2_EMPH                       (0x01C)
+#define QSERDES_TX_L0_TX_BOOST_LVL_UP_DN                  (0x020)
+#define QSERDES_TX_L0_HP_PD_ENABLES                       (0x024)
+#define QSERDES_TX_L0_TX_IDLE_LVL_LARGE_AMP               (0x028)
+#define QSERDES_TX_L0_TX_DRV_LVL                          (0x02C)
+#define QSERDES_TX_L0_TX_DRV_LVL_OFFSET                   (0x030)
+#define QSERDES_TX_L0_RESET_TSYNC_EN                      (0x034)
+#define QSERDES_TX_L0_PRE_STALL_LDO_BOOST_EN              (0x038)
+#define QSERDES_TX_L0_TX_BAND                             (0x03C)
+#define QSERDES_TX_L0_SLEW_CNTL                           (0x040)
+#define QSERDES_TX_L0_INTERFACE_SELECT                    (0x044)
+#define QSERDES_TX_L0_LPB_EN                              (0x048)
+#define QSERDES_TX_L0_RES_CODE_LANE_TX                    (0x04C)
+#define QSERDES_TX_L0_RES_CODE_LANE_RX                    (0x050)
+#define QSERDES_TX_L0_RES_CODE_LANE_OFFSET                (0x054)
+#define QSERDES_TX_L0_PERL_LENGTH1                        (0x058)
+#define QSERDES_TX_L0_PERL_LENGTH2                        (0x05C)
+#define QSERDES_TX_L0_SERDES_BYP_EN_OUT                   (0x060)
+#define QSERDES_TX_L0_DEBUG_BUS_SEL                       (0x064)
+#define QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN    (0x068)
+#define QSERDES_TX_L0_TX_POL_INV                          (0x06C)
+#define QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN          (0x070)
+#define QSERDES_TX_L0_BIST_PATTERN1                       (0x074)
+#define QSERDES_TX_L0_BIST_PATTERN2                       (0x078)
+#define QSERDES_TX_L0_BIST_PATTERN3                       (0x07C)
+#define QSERDES_TX_L0_BIST_PATTERN4                       (0x080)
+#define QSERDES_TX_L0_BIST_PATTERN5                       (0x084)
+#define QSERDES_TX_L0_BIST_PATTERN6                       (0x088)
+#define QSERDES_TX_L0_BIST_PATTERN7                       (0x08C)
+#define QSERDES_TX_L0_BIST_PATTERN8                       (0x090)
+#define QSERDES_TX_L0_LANE_MODE                           (0x094)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE                  (0x098)
+#define QSERDES_TX_L0_IDAC_CAL_LANE_MODE_CONFIGURATION    (0x09C)
+#define QSERDES_TX_L0_ATB_SEL1                            (0x0A0)
+#define QSERDES_TX_L0_ATB_SEL2                            (0x0A4)
+#define QSERDES_TX_L0_RCV_DETECT_LVL                      (0x0A8)
+#define QSERDES_TX_L0_RCV_DETECT_LVL_2                    (0x0AC)
+#define QSERDES_TX_L0_PRBS_SEED1                          (0x0B0)
+#define QSERDES_TX_L0_PRBS_SEED2                          (0x0B4)
+#define QSERDES_TX_L0_PRBS_SEED3                          (0x0B8)
+#define QSERDES_TX_L0_PRBS_SEED4                          (0x0BC)
+#define QSERDES_TX_L0_RESET_GEN                           (0x0C0)
+#define QSERDES_TX_L0_RESET_GEN_MUXES                     (0x0C4)
+#define QSERDES_TX_L0_TRAN_DRVR_EMP_EN                    (0x0C8)
+#define QSERDES_TX_L0_TX_INTERFACE_MODE                   (0x0CC)
+#define QSERDES_TX_L0_PWM_CTRL                            (0x0D0)
+#define QSERDES_TX_L0_PWM_ENCODED_OR_DATA                 (0x0D4)
+#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND2            (0x0D8)
+#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND2            (0x0DC)
+#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND2            (0x0E0)
+#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND2            (0x0E4)
+#define QSERDES_TX_L0_PWM_GEAR_1_DIVIDER_BAND0_1          (0x0E8)
+#define QSERDES_TX_L0_PWM_GEAR_2_DIVIDER_BAND0_1          (0x0EC)
+#define QSERDES_TX_L0_PWM_GEAR_3_DIVIDER_BAND0_1          (0x0F0)
+#define QSERDES_TX_L0_PWM_GEAR_4_DIVIDER_BAND0_1          (0x0F4)
+#define QSERDES_TX_L0_VMODE_CTRL1                         (0x0F8)
+#define QSERDES_TX_L0_VMODE_CTRL2                         (0x0FC)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV_CNTL              (0x100)
+#define QSERDES_TX_L0_BIST_STATUS                         (0x104)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT1                   (0x108)
+#define QSERDES_TX_L0_BIST_ERROR_COUNT2                   (0x10C)
+#define QSERDES_TX_L0_TX_ALOG_INTF_OBSV                   (0x110)
+
+/* HDMI PHY REGISTERS */
+#define HDMI_PHY_BASE_OFFSET                  (0xC00)
+
+#define HDMI_PHY_CFG                          (0x00)
+#define HDMI_PHY_PD_CTL                       (0x04)
+#define HDMI_PHY_MODE                         (0x08)
+#define HDMI_PHY_MISR_CLEAR                   (0x0C)
+#define HDMI_PHY_TX0_TX1_BIST_CFG0            (0x10)
+#define HDMI_PHY_TX0_TX1_BIST_CFG1            (0x14)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE0      (0x18)
+#define HDMI_PHY_TX0_TX1_PRBS_SEED_BYTE1      (0x1C)
+#define HDMI_PHY_TX0_TX1_BIST_PATTERN0        (0x20)
+#define HDMI_PHY_TX0_TX1_BIST_PATTERN1        (0x24)
+#define HDMI_PHY_TX2_TX3_BIST_CFG0            (0x28)
+#define HDMI_PHY_TX2_TX3_BIST_CFG1            (0x2C)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE0      (0x30)
+#define HDMI_PHY_TX2_TX3_PRBS_SEED_BYTE1      (0x34)
+#define HDMI_PHY_TX2_TX3_BIST_PATTERN0        (0x38)
+#define HDMI_PHY_TX2_TX3_BIST_PATTERN1        (0x3C)
+#define HDMI_PHY_DEBUG_BUS_SEL                (0x40)
+#define HDMI_PHY_TXCAL_CFG0                   (0x44)
+#define HDMI_PHY_TXCAL_CFG1                   (0x48)
+#define HDMI_PHY_TX0_TX1_LANE_CTL             (0x4C)
+#define HDMI_PHY_TX2_TX3_LANE_CTL             (0x50)
+#define HDMI_PHY_LANE_BIST_CONFIG             (0x54)
+#define HDMI_PHY_CLOCK                        (0x58)
+#define HDMI_PHY_MISC1                        (0x5C)
+#define HDMI_PHY_MISC2                        (0x60)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS0         (0x64)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS1         (0x68)
+#define HDMI_PHY_TX0_TX1_BIST_STATUS2         (0x6C)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS0         (0x70)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS1         (0x74)
+#define HDMI_PHY_TX2_TX3_BIST_STATUS2         (0x78)
+#define HDMI_PHY_PRE_MISR_STATUS0             (0x7C)
+#define HDMI_PHY_PRE_MISR_STATUS1             (0x80)
+#define HDMI_PHY_PRE_MISR_STATUS2             (0x84)
+#define HDMI_PHY_PRE_MISR_STATUS3             (0x88)
+#define HDMI_PHY_POST_MISR_STATUS0            (0x8C)
+#define HDMI_PHY_POST_MISR_STATUS1            (0x90)
+#define HDMI_PHY_POST_MISR_STATUS2            (0x94)
+#define HDMI_PHY_POST_MISR_STATUS3            (0x98)
+#define HDMI_PHY_STATUS                       (0x9C)
+#define HDMI_PHY_MISC3_STATUS                 (0xA0)
+#define HDMI_PHY_MISC4_STATUS                 (0xA4)
+#define HDMI_PHY_DEBUG_BUS0                   (0xA8)
+#define HDMI_PHY_DEBUG_BUS1                   (0xAC)
+#define HDMI_PHY_DEBUG_BUS2                   (0xB0)
+#define HDMI_PHY_DEBUG_BUS3                   (0xB4)
+#define HDMI_PHY_PHY_REVISION_ID0             (0xB8)
+#define HDMI_PHY_PHY_REVISION_ID1             (0xBC)
+#define HDMI_PHY_PHY_REVISION_ID2             (0xC0)
+#define HDMI_PHY_PHY_REVISION_ID3             (0xC4)
+
+#define HDMI_PLL_POLL_MAX_READS                100
+#define HDMI_PLL_POLL_TIMEOUT_US               1500
+
+enum hdmi_pll_freqs {
+	HDMI_PCLK_25200_KHZ,
+	HDMI_PCLK_27027_KHZ,
+	HDMI_PCLK_27000_KHZ,
+	HDMI_PCLK_74250_KHZ,
+	HDMI_PCLK_148500_KHZ,
+	HDMI_PCLK_154000_KHZ,
+	HDMI_PCLK_268500_KHZ,
+	HDMI_PCLK_297000_KHZ,
+	HDMI_PCLK_594000_KHZ,
+	HDMI_PCLK_MAX
+};
+
+struct hdmi_8996_phy_pll_reg_cfg {
+	u32 tx_l0_lane_mode;
+	u32 tx_l2_lane_mode;
+	u32 tx_l0_tx_band;
+	u32 tx_l1_tx_band;
+	u32 tx_l2_tx_band;
+	u32 tx_l3_tx_band;
+	u32 com_svs_mode_clk_sel;
+	u32 com_hsclk_sel;
+	u32 com_pll_cctrl_mode0;
+	u32 com_pll_rctrl_mode0;
+	u32 com_cp_ctrl_mode0;
+	u32 com_dec_start_mode0;
+	u32 com_div_frac_start1_mode0;
+	u32 com_div_frac_start2_mode0;
+	u32 com_div_frac_start3_mode0;
+	u32 com_integloop_gain0_mode0;
+	u32 com_integloop_gain1_mode0;
+	u32 com_lock_cmp_en;
+	u32 com_lock_cmp1_mode0;
+	u32 com_lock_cmp2_mode0;
+	u32 com_lock_cmp3_mode0;
+	u32 com_core_clk_en;
+	u32 com_coreclk_div;
+	u32 com_restrim_ctrl;
+	u32 com_vco_tune_ctrl;
+
+	u32 tx_l0_tx_drv_lvl;
+	u32 tx_l0_tx_emp_post1_lvl;
+	u32 tx_l1_tx_drv_lvl;
+	u32 tx_l1_tx_emp_post1_lvl;
+	u32 tx_l2_tx_drv_lvl;
+	u32 tx_l2_tx_emp_post1_lvl;
+	u32 tx_l3_tx_drv_lvl;
+	u32 tx_l3_tx_emp_post1_lvl;
+	u32 tx_l0_vmode_ctrl1;
+	u32 tx_l0_vmode_ctrl2;
+	u32 tx_l1_vmode_ctrl1;
+	u32 tx_l1_vmode_ctrl2;
+	u32 tx_l2_vmode_ctrl1;
+	u32 tx_l2_vmode_ctrl2;
+	u32 tx_l3_vmode_ctrl1;
+	u32 tx_l3_vmode_ctrl2;
+	u32 tx_l0_res_code_lane_tx;
+	u32 tx_l1_res_code_lane_tx;
+	u32 tx_l2_res_code_lane_tx;
+	u32 tx_l3_res_code_lane_tx;
+
+	u32 phy_mode;
+};
+
+struct hdmi_8996_v3_post_divider {
+	u64 vco_freq;
+	u64 hsclk_divsel;
+	u64 vco_ratio;
+	u64 tx_band_sel;
+	u64 half_rate_mode;
+};
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_8996_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+static inline u64 hdmi_8996_v1_get_post_div_lt_2g(u64 bclk)
+{
+	if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ)
+		return 2;
+	else if (bclk >= HDMI_1700MHZ_BIT_CLK_HZ)
+		return 3;
+	else if (bclk >= HDMI_1200MHZ_BIT_CLK_HZ)
+		return 4;
+	else if (bclk >= HDMI_850MHZ_BIT_CLK_HZ)
+		return 3;
+	else if (bclk >= HDMI_600MHZ_BIT_CLK_HZ)
+		return 4;
+	else if (bclk >= HDMI_450MHZ_BIT_CLK_HZ)
+		return 3;
+	else if (bclk >= HDMI_300MHZ_BIT_CLK_HZ)
+		return 4;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_post_div_lt_2g(u64 bclk, u64 vco_range)
+{
+	u64 hdmi_8ghz = vco_range;
+	u64 tmp_calc;
+
+	hdmi_8ghz <<= 2;
+	tmp_calc = hdmi_8ghz;
+	do_div(tmp_calc, 6U);
+
+	if (bclk >= vco_range)
+		return 2;
+	else if (bclk >= tmp_calc)
+		return 3;
+	else if (bclk >= vco_range >> 1)
+		return 4;
+
+	tmp_calc = hdmi_8ghz;
+	do_div(tmp_calc, 12U);
+	if (bclk >= tmp_calc)
+		return 3;
+	else if (bclk >= vco_range >> 2)
+		return 4;
+
+	tmp_calc = hdmi_8ghz;
+	do_div(tmp_calc, 24U);
+	if (bclk >= tmp_calc)
+		return 3;
+	else if (bclk >= vco_range >> 3)
+		return 4;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_post_div_gt_2g(u64 hsclk)
+{
+	if (hsclk >= 0 && hsclk <= 3)
+		return hsclk + 1;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_get_coreclk_div_lt_2g(u64 bclk)
+{
+	if (bclk >= HDMI_1334MHZ_BIT_CLK_HZ)
+		return 1;
+	else if (bclk >= HDMI_1000MHZ_BIT_CLK_HZ)
+		return 1;
+	else if (bclk >= HDMI_667MHZ_BIT_CLK_HZ)
+		return 2;
+	else if (bclk >= HDMI_500MHZ_BIT_CLK_HZ)
+		return 2;
+	else if (bclk >= HDMI_334MHZ_BIT_CLK_HZ)
+		return 3;
+	else if (bclk >= HDMI_250MHZ_BIT_CLK_HZ)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_get_coreclk_div_ratio(u64 clks_pll_divsel,
+						  u64 coreclk_div)
+{
+	if (clks_pll_divsel == 0)
+		return coreclk_div*2;
+	else if (clks_pll_divsel == 1)
+		return coreclk_div*4;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v1_get_tx_band(u64 bclk)
+{
+	if (bclk >= 2400000000UL)
+		return 0;
+	if (bclk >= 1200000000UL)
+		return 1;
+	if (bclk >= 600000000UL)
+		return 2;
+	if (bclk >= 300000000UL)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_tx_band(u64 bclk, u64 vco_range)
+{
+	if (bclk >= vco_range)
+		return 0;
+	else if (bclk >= vco_range >> 1)
+		return 1;
+	else if (bclk >= vco_range >> 2)
+		return 2;
+	else if (bclk >= vco_range >> 3)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v1_get_hsclk(u64 fdata)
+{
+	if (fdata >= 9600000000UL)
+		return 0;
+	else if (fdata >= 4800000000UL)
+		return 1;
+	else if (fdata >= 3200000000UL)
+		return 2;
+	else if (fdata >= 2400000000UL)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+}
+
+static inline u64 hdmi_8996_v2_get_hsclk(u64 fdata, u64 vco_range)
+{
+	u64 tmp_calc = vco_range;
+
+	tmp_calc <<= 2;
+	do_div(tmp_calc, 3U);
+	if (fdata >= (vco_range << 2))
+		return 0;
+	else if (fdata >= (vco_range << 1))
+		return 1;
+	else if (fdata >= tmp_calc)
+		return 2;
+	else if (fdata >= vco_range)
+		return 3;
+
+	return HDMI_64B_ERR_VAL;
+
+}
+
+static inline u64 hdmi_8996_v2_get_vco_freq(u64 bclk, u64 vco_range)
+{
+	u64 tx_band_div_ratio = 1U << hdmi_8996_v2_get_tx_band(bclk, vco_range);
+	u64 pll_post_div_ratio;
+
+	if (bclk >= vco_range) {
+		u64 hsclk = hdmi_8996_v2_get_hsclk(bclk, vco_range);
+
+		pll_post_div_ratio = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
+	} else {
+		pll_post_div_ratio = hdmi_8996_v2_get_post_div_lt_2g(bclk,
+								vco_range);
+	}
+
+	return bclk * (pll_post_div_ratio * tx_band_div_ratio);
+}
+
+static inline u64 hdmi_8996_v2_get_fdata(u64 bclk, u64 vco_range)
+{
+	if (bclk >= vco_range)
+		return bclk;
+	u64 tmp_calc = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
+	u64 pll_post_div_ratio_lt_2g = hdmi_8996_v2_get_post_div_lt_2g(
+						bclk, vco_range);
+	if (pll_post_div_ratio_lt_2g == HDMI_64B_ERR_VAL)
+		return HDMI_64B_ERR_VAL;
+
+	do_div(tmp_calc, pll_post_div_ratio_lt_2g);
+		return tmp_calc;
+}
+
+static inline u64 hdmi_8996_get_cpctrl(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || gen_ssc)
+		/*
+		 * This should be ROUND(11/(19.2/20))).
+		 * Since ref clock does not change, hardcoding to 11
+		 */
+		return 0xB;
+
+	return 0x23;
+}
+
+static inline u64 hdmi_8996_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || gen_ssc)
+		return 0x16;
+
+	return 0x10;
+}
+
+static inline u64 hdmi_8996_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || (gen_ssc))
+		return 0x28;
+
+	return 0x1;
+}
+
+static inline u64 hdmi_8996_get_integloop_gain(u64 frac_start, bool gen_ssc)
+{
+	if ((frac_start != 0) || gen_ssc)
+		return 0x80;
+
+	return 0xC4;
+}
+
+static inline u64 hdmi_8996_v3_get_integloop_gain(u64 frac_start, u64 bclk,
+							bool gen_ssc)
+{
+	u64 digclk_divsel = bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+	u64 base = ((frac_start != 0) || gen_ssc) ? 0x40 : 0xC4;
+
+	base <<= digclk_divsel;
+
+	return (base <= 2046 ? base : 0x7FE);
+}
+
+static inline u64 hdmi_8996_get_vco_tune(u64 fdata, u64 div)
+{
+	u64 vco_tune;
+
+	vco_tune = fdata * div;
+	do_div(vco_tune, 1000000);
+	vco_tune = 13000 - vco_tune - 256;
+	do_div(vco_tune, 5);
+
+	return vco_tune;
+}
+
+static inline u64 hdmi_8996_get_pll_cmp(u64 pll_cmp_cnt, u64 core_clk)
+{
+	u64 pll_cmp;
+	u64 rem;
+
+	pll_cmp = pll_cmp_cnt * core_clk;
+	rem = do_div(pll_cmp, HDMI_REF_CLOCK);
+	if (rem > (HDMI_REF_CLOCK >> 1))
+		pll_cmp++;
+	pll_cmp -= 1;
+
+	return pll_cmp;
+}
+
+static inline u64 hdmi_8996_v3_get_pll_cmp(u64 pll_cmp_cnt, u64 fdata)
+{
+	u64 dividend = pll_cmp_cnt * fdata;
+	u64 divisor = HDMI_REF_CLOCK * 10;
+	u64 rem;
+
+	rem = do_div(dividend, divisor);
+	if (rem > (divisor >> 1))
+		dividend++;
+
+	return dividend - 1;
+}
+
+static int hdmi_8996_v3_get_post_div(struct hdmi_8996_v3_post_divider *pd,
+						u64 bclk)
+{
+	u32 ratio[] = {2, 3, 4, 5, 6, 9, 10, 12, 14, 15, 20, 21, 25, 28, 35};
+	u32 tx_band_sel[] = {0, 1, 2, 3};
+	u64 vco_freq[60];
+	u64 vco, vco_optimal, half_rate_mode = 0;
+	int vco_optimal_index, vco_freq_index;
+	int i, j, k, x;
+
+	for (i = 0; i <= 1; i++) {
+		vco_optimal = HDMI_VCO_MAX_FREQ;
+		vco_optimal_index = -1;
+		vco_freq_index = 0;
+		for (j = 0; j < 15; j++) {
+			for (k = 0; k < 4; k++) {
+				u64 ratio_mult = ratio[j] << tx_band_sel[k];
+
+				vco = bclk >> half_rate_mode;
+				vco *= ratio_mult;
+				vco_freq[vco_freq_index++] = vco;
+			}
+		}
+
+		for (x = 0; x < 60; x++) {
+			u64 vco_tmp = vco_freq[x];
+
+			if ((vco_tmp >= HDMI_VCO_MIN_FREQ) &&
+					(vco_tmp <= vco_optimal)) {
+				vco_optimal = vco_tmp;
+				vco_optimal_index = x;
+			}
+		}
+
+		if (vco_optimal_index == -1) {
+			if (!half_rate_mode)
+				half_rate_mode++;
+			else
+				return -EINVAL;
+		} else {
+			pd->vco_freq = vco_optimal;
+			pd->tx_band_sel = tx_band_sel[vco_optimal_index % 4];
+			pd->vco_ratio = ratio[vco_optimal_index / 4];
+			break;
+		}
+	}
+
+	switch (pd->vco_ratio) {
+	case 2:
+		pd->hsclk_divsel = 0;
+		break;
+	case 3:
+		pd->hsclk_divsel = 4;
+		break;
+	case 4:
+		pd->hsclk_divsel = 8;
+		break;
+	case 5:
+		pd->hsclk_divsel = 12;
+		break;
+	case 6:
+		pd->hsclk_divsel = 1;
+		break;
+	case 9:
+		pd->hsclk_divsel = 5;
+		break;
+	case 10:
+		pd->hsclk_divsel = 2;
+		break;
+	case 12:
+		pd->hsclk_divsel = 9;
+		break;
+	case 14:
+		pd->hsclk_divsel = 3;
+		break;
+	case 15:
+		pd->hsclk_divsel = 13;
+		break;
+	case 20:
+		pd->hsclk_divsel = 10;
+		break;
+	case 21:
+		pd->hsclk_divsel = 7;
+		break;
+	case 25:
+		pd->hsclk_divsel = 14;
+		break;
+	case 28:
+		pd->hsclk_divsel = 11;
+		break;
+	case 35:
+		pd->hsclk_divsel = 15;
+		break;
+	}
+
+	return 0;
+}
+
+static int hdmi_8996_v1_calculate(u32 pix_clk,
+			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+	int rc = -EINVAL;
+	u64 fdata, clk_divtx, tmds_clk;
+	u64 bclk;
+	u64 post_div_gt_2g;
+	u64 post_div_lt_2g;
+	u64 coreclk_div1_lt_2g;
+	u64 core_clk_div_ratio;
+	u64 core_clk;
+	u64 pll_cmp;
+	u64 tx_band;
+	u64 tx_band_div_ratio;
+	u64 hsclk;
+	u64 dec_start;
+	u64 frac_start;
+	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+	u64 cpctrl;
+	u64 rctrl;
+	u64 cctrl;
+	u64 integloop_gain;
+	u64 vco_tune;
+	u64 vco_freq;
+	u64 rem;
+
+	/* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
+	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+		tmds_clk = bclk/4;
+	else
+		tmds_clk = bclk;
+
+	post_div_lt_2g = hdmi_8996_v1_get_post_div_lt_2g(bclk);
+	if (post_div_lt_2g == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	coreclk_div1_lt_2g = hdmi_8996_get_coreclk_div_lt_2g(bclk);
+
+	core_clk_div_ratio = hdmi_8996_get_coreclk_div_ratio(
+				HDMI_CLKS_PLL_DIVSEL, HDMI_CORECLK_DIV);
+
+	tx_band = hdmi_8996_v1_get_tx_band(bclk);
+	if (tx_band == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	tx_band_div_ratio = 1 << tx_band;
+
+	if (bclk >= HDMI_2400MHZ_BIT_CLK_HZ) {
+		fdata = bclk;
+		hsclk = hdmi_8996_v1_get_hsclk(fdata);
+		if (hsclk == HDMI_64B_ERR_VAL)
+			goto fail;
+
+		post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
+		if (post_div_gt_2g == HDMI_64B_ERR_VAL)
+			goto fail;
+
+		vco_freq = bclk * (post_div_gt_2g * tx_band_div_ratio);
+		clk_divtx = vco_freq;
+		do_div(clk_divtx, post_div_gt_2g);
+	} else {
+		vco_freq = bclk * (post_div_lt_2g * tx_band_div_ratio);
+		fdata = vco_freq;
+		do_div(fdata, post_div_lt_2g);
+		hsclk = hdmi_8996_v1_get_hsclk(fdata);
+		if (hsclk == HDMI_64B_ERR_VAL)
+			goto fail;
+
+		clk_divtx = vco_freq;
+		do_div(clk_divtx, post_div_lt_2g);
+		post_div_gt_2g = (hsclk <= 3) ? (hsclk + 1) : HDMI_64B_ERR_VAL;
+		if (post_div_gt_2g == HDMI_64B_ERR_VAL)
+			goto fail;
+	}
+
+	/* Decimal and fraction values */
+	dec_start = fdata * post_div_gt_2g;
+	do_div(dec_start, pll_divisor);
+	frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
+			(fdata * post_div_gt_2g))) * (1 << 20));
+	rem = do_div(frac_start, pll_divisor);
+	/* Round off frac_start to closest integer */
+	if (rem >= (pll_divisor >> 1))
+		frac_start++;
+
+	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+	rctrl = hdmi_8996_get_rctrl(frac_start, false);
+	cctrl = hdmi_8996_get_cctrl(frac_start, false);
+	integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
+	vco_tune = hdmi_8996_get_vco_tune(fdata, post_div_gt_2g);
+
+	core_clk = clk_divtx;
+	do_div(core_clk, core_clk_div_ratio);
+	pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
+
+	/* Debug dump */
+	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+	DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
+	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+	DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
+	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+	/* Convert these values to register specific values */
+	cfg->tx_l0_lane_mode = 0x3;
+	cfg->tx_l2_lane_mode = 0x3;
+	cfg->tx_l0_tx_band = tx_band + 4;
+	cfg->tx_l1_tx_band = tx_band + 4;
+	cfg->tx_l2_tx_band = tx_band + 4;
+	cfg->tx_l3_tx_band = tx_band + 4;
+	cfg->tx_l0_res_code_lane_tx = 0x33;
+	cfg->tx_l1_res_code_lane_tx = 0x33;
+	cfg->tx_l2_res_code_lane_tx = 0x33;
+	cfg->tx_l3_res_code_lane_tx = 0x33;
+	cfg->com_restrim_ctrl = 0x0;
+	cfg->com_vco_tune_ctrl = 0x1C;
+
+	cfg->com_svs_mode_clk_sel =
+			(bclk >= HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2);
+	cfg->com_hsclk_sel = (0x28 | hsclk);
+	cfg->com_pll_cctrl_mode0 = cctrl;
+	cfg->com_pll_rctrl_mode0 = rctrl;
+	cfg->com_cp_ctrl_mode0 = cpctrl;
+	cfg->com_dec_start_mode0 = dec_start;
+	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
+	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x22;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+		cfg->com_restrim_ctrl = 0x0;
+	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x25;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+		cfg->com_restrim_ctrl = 0x0;
+	} else {
+		cfg->tx_l0_tx_drv_lvl = 0x20;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l1_tx_drv_lvl = 0x20;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l2_tx_drv_lvl = 0x20;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l3_tx_drv_lvl = 0x20;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0E;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0E;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0E;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x0E;
+		cfg->com_restrim_ctrl = 0xD8;
+	}
+
+	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+						cfg->com_svs_mode_clk_sel);
+	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+						cfg->com_pll_cctrl_mode0);
+	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+						cfg->com_pll_rctrl_mode0);
+	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+						cfg->com_cp_ctrl_mode0);
+	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+						cfg->com_dec_start_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+						cfg->com_div_frac_start1_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+						cfg->com_div_frac_start2_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+						cfg->com_div_frac_start3_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+						cfg->com_integloop_gain0_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+						cfg->com_integloop_gain1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+						cfg->com_lock_cmp1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+						cfg->com_lock_cmp2_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+						cfg->com_lock_cmp3_mode0);
+	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+	DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n",	cfg->com_restrim_ctrl);
+
+	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l0_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l1_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l2_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l3_tx_emp_post1_lvl);
+
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l0_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l1_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l2_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l3_res_code_lane_tx);
+
+	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+	rc = 0;
+fail:
+	return rc;
+}
+
+static int hdmi_8996_v2_calculate(u32 pix_clk,
+			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+	int rc = -EINVAL;
+	u64 fdata, clk_divtx, tmds_clk;
+	u64 bclk;
+	u64 post_div;
+	u64 core_clk_div;
+	u64 core_clk_div_ratio;
+	u64 core_clk;
+	u64 pll_cmp;
+	u64 tx_band;
+	u64 tx_band_div_ratio;
+	u64 hsclk;
+	u64 dec_start;
+	u64 frac_start;
+	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+	u64 cpctrl;
+	u64 rctrl;
+	u64 cctrl;
+	u64 integloop_gain;
+	u64 vco_tune;
+	u64 vco_freq;
+	u64 vco_range;
+	u64 rem;
+
+	/* FDATA, CLK_DIVTX, PIXEL_CLK, TMDS_CLK */
+	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+		tmds_clk = pix_clk >> 2;
+	else
+		tmds_clk = pix_clk;
+
+	vco_range = bclk < HDMI_282MHZ_BIT_CLK_HZ ? HDMI_2000MHZ_BIT_CLK_HZ :
+				HDMI_2250MHZ_BIT_CLK_HZ;
+
+	fdata = hdmi_8996_v2_get_fdata(bclk, vco_range);
+	if (fdata == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	hsclk = hdmi_8996_v2_get_hsclk(fdata, vco_range);
+	if (hsclk == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	if (bclk >= vco_range)
+		post_div = hdmi_8996_v2_get_post_div_gt_2g(hsclk);
+	else
+		post_div = hdmi_8996_v2_get_post_div_lt_2g(bclk, vco_range);
+
+	if (post_div == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	core_clk_div = 5;
+	core_clk_div_ratio = core_clk_div * 2;
+
+	tx_band = hdmi_8996_v2_get_tx_band(bclk, vco_range);
+	if (tx_band == HDMI_64B_ERR_VAL)
+		goto fail;
+
+	tx_band_div_ratio = 1 << tx_band;
+
+	vco_freq = hdmi_8996_v2_get_vco_freq(bclk, vco_range);
+	clk_divtx = vco_freq;
+	do_div(clk_divtx, post_div);
+
+	/* Decimal and fraction values */
+	dec_start = fdata * post_div;
+	do_div(dec_start, pll_divisor);
+	frac_start = ((pll_divisor - (((dec_start + 1) * pll_divisor) -
+			(fdata * post_div))) * (1 << 20));
+	rem = do_div(frac_start, pll_divisor);
+	/* Round off frac_start to closest integer */
+	if (rem >= (pll_divisor >> 1))
+		frac_start++;
+
+	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+	rctrl = hdmi_8996_get_rctrl(frac_start, false);
+	cctrl = hdmi_8996_get_cctrl(frac_start, false);
+	integloop_gain = hdmi_8996_get_integloop_gain(frac_start, false);
+	vco_tune = hdmi_8996_get_vco_tune(fdata, post_div);
+
+	core_clk = clk_divtx;
+	do_div(core_clk, core_clk_div_ratio);
+	pll_cmp = hdmi_8996_get_pll_cmp(1024, core_clk);
+
+	/* Debug dump */
+	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+	DEV_DBG("%s: CLK_DIVTX: %llu\n", __func__, clk_divtx);
+	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+	DEV_DBG("%s: VCO_TUNE: %llu\n", __func__, vco_tune);
+	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+	/* Convert these values to register specific values */
+	cfg->tx_l0_lane_mode = 0x3;
+	cfg->tx_l2_lane_mode = 0x3;
+	cfg->tx_l0_tx_band = tx_band + 4;
+	cfg->tx_l1_tx_band = tx_band + 4;
+	cfg->tx_l2_tx_band = tx_band + 4;
+	cfg->tx_l3_tx_band = tx_band + 4;
+
+	if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+		cfg->com_svs_mode_clk_sel = 1;
+	else
+		cfg->com_svs_mode_clk_sel = 2;
+
+	cfg->com_hsclk_sel = (0x28 | hsclk);
+	cfg->com_pll_cctrl_mode0 = cctrl;
+	cfg->com_pll_rctrl_mode0 = rctrl;
+	cfg->com_cp_ctrl_mode0 = cpctrl;
+	cfg->com_dec_start_mode0 = dec_start;
+	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->com_core_clk_en = (0x6C | (HDMI_CLKS_PLL_DIVSEL << 4));
+	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+	cfg->com_vco_tune_ctrl = 0x0;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x22;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+		cfg->tx_l0_res_code_lane_tx = 0x3F;
+		cfg->tx_l1_res_code_lane_tx = 0x3F;
+		cfg->tx_l2_res_code_lane_tx = 0x3F;
+		cfg->tx_l3_res_code_lane_tx = 0x3F;
+		cfg->com_restrim_ctrl = 0x0;
+	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x25;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+		cfg->tx_l0_res_code_lane_tx = 0x39;
+		cfg->tx_l1_res_code_lane_tx = 0x39;
+		cfg->tx_l2_res_code_lane_tx = 0x39;
+		cfg->tx_l3_res_code_lane_tx = 0x39;
+		cfg->com_restrim_ctrl = 0x0;
+	} else {
+		cfg->tx_l0_tx_drv_lvl = 0x20;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l1_tx_drv_lvl = 0x20;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l2_tx_drv_lvl = 0x20;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l3_tx_drv_lvl = 0x20;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0E;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0E;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0E;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x0E;
+		cfg->tx_l0_res_code_lane_tx = 0x3F;
+		cfg->tx_l1_res_code_lane_tx = 0x3F;
+		cfg->tx_l2_res_code_lane_tx = 0x3F;
+		cfg->tx_l3_res_code_lane_tx = 0x3F;
+		cfg->com_restrim_ctrl = 0xD8;
+	}
+
+	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+						cfg->com_svs_mode_clk_sel);
+	DEV_DBG("PLL PARAM: com_vco_tune_ctrl = 0x%x\n",
+						cfg->com_vco_tune_ctrl);
+	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+	DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
+	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+						cfg->com_pll_cctrl_mode0);
+	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+						cfg->com_pll_rctrl_mode0);
+	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+						cfg->com_cp_ctrl_mode0);
+	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+						cfg->com_dec_start_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+						cfg->com_div_frac_start1_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+						cfg->com_div_frac_start2_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+						cfg->com_div_frac_start3_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+						cfg->com_integloop_gain0_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+						cfg->com_integloop_gain1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+						cfg->com_lock_cmp1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+						cfg->com_lock_cmp2_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+						cfg->com_lock_cmp3_mode0);
+	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+
+	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l0_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l1_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l2_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l3_tx_emp_post1_lvl);
+
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: tx_l0_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l0_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l1_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l1_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l2_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l2_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: tx_l3_res_code_lane_tx = 0x%x\n",
+					cfg->tx_l3_res_code_lane_tx);
+	DEV_DBG("PLL PARAM: com_restrim_ctrl = 0x%x\n",	cfg->com_restrim_ctrl);
+
+	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+	rc = 0;
+fail:
+	return rc;
+}
+
+static int hdmi_8996_v3_calculate(u32 pix_clk,
+			       struct hdmi_8996_phy_pll_reg_cfg *cfg)
+{
+	int rc = -EINVAL;
+	struct hdmi_8996_v3_post_divider pd;
+	u64 fdata, tmds_clk;
+	u64 bclk;
+	u64 pll_cmp;
+	u64 tx_band;
+	u64 hsclk;
+	u64 dec_start;
+	u64 frac_start;
+	u64 pll_divisor = 4 * HDMI_REF_CLOCK;
+	u64 cpctrl;
+	u64 rctrl;
+	u64 cctrl;
+	u64 integloop_gain;
+	u64 vco_freq;
+	u64 rem;
+
+	/* FDATA, HSCLK, PIXEL_CLK, TMDS_CLK */
+	bclk = ((u64)pix_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD)
+		tmds_clk = pix_clk >> 2;
+	else
+		tmds_clk = pix_clk;
+
+	if (hdmi_8996_v3_get_post_div(&pd, bclk) || pd.vco_ratio <= 0 ||
+			pd.vco_freq <= 0)
+		goto fail;
+
+	vco_freq = pd.vco_freq;
+	fdata = pd.vco_freq;
+	do_div(fdata, pd.vco_ratio);
+
+	hsclk = pd.hsclk_divsel;
+	dec_start = vco_freq;
+	do_div(dec_start, pll_divisor);
+
+	frac_start = vco_freq * (1 << 20);
+	rem = do_div(frac_start, pll_divisor);
+	frac_start -= dec_start * (1 << 20);
+	if (rem > (pll_divisor >> 1))
+		frac_start++;
+
+	cpctrl = hdmi_8996_get_cpctrl(frac_start, false);
+	rctrl = hdmi_8996_get_rctrl(frac_start, false);
+	cctrl = hdmi_8996_get_cctrl(frac_start, false);
+	integloop_gain = hdmi_8996_v3_get_integloop_gain(frac_start, bclk,
+									false);
+	pll_cmp = hdmi_8996_v3_get_pll_cmp(1024, fdata);
+	tx_band = pd.tx_band_sel;
+
+	/* Debug dump */
+	DEV_DBG("%s: VCO freq: %llu\n", __func__, vco_freq);
+	DEV_DBG("%s: fdata: %llu\n", __func__, fdata);
+	DEV_DBG("%s: pix_clk: %d\n", __func__, pix_clk);
+	DEV_DBG("%s: tmds clk: %llu\n", __func__, tmds_clk);
+	DEV_DBG("%s: HSCLK_SEL: %llu\n", __func__, hsclk);
+	DEV_DBG("%s: DEC_START: %llu\n", __func__, dec_start);
+	DEV_DBG("%s: DIV_FRAC_START: %llu\n", __func__, frac_start);
+	DEV_DBG("%s: PLL_CPCTRL: %llu\n", __func__, cpctrl);
+	DEV_DBG("%s: PLL_RCTRL: %llu\n", __func__, rctrl);
+	DEV_DBG("%s: PLL_CCTRL: %llu\n", __func__, cctrl);
+	DEV_DBG("%s: INTEGLOOP_GAIN: %llu\n", __func__, integloop_gain);
+	DEV_DBG("%s: TX_BAND: %llu\n", __func__, tx_band);
+	DEV_DBG("%s: PLL_CMP: %llu\n", __func__, pll_cmp);
+
+	/* Convert these values to register specific values */
+	cfg->tx_l0_tx_band = tx_band + 4;
+	cfg->tx_l1_tx_band = tx_band + 4;
+	cfg->tx_l2_tx_band = tx_band + 4;
+	cfg->tx_l3_tx_band = tx_band + 4;
+
+	if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+		cfg->com_svs_mode_clk_sel = 1;
+	else
+		cfg->com_svs_mode_clk_sel = 2;
+
+	cfg->com_hsclk_sel = (0x20 | hsclk);
+	cfg->com_pll_cctrl_mode0 = cctrl;
+	cfg->com_pll_rctrl_mode0 = rctrl;
+	cfg->com_cp_ctrl_mode0 = cpctrl;
+	cfg->com_dec_start_mode0 = dec_start;
+	cfg->com_div_frac_start1_mode0 = (frac_start & 0xFF);
+	cfg->com_div_frac_start2_mode0 = ((frac_start & 0xFF00) >> 8);
+	cfg->com_div_frac_start3_mode0 = ((frac_start & 0xF0000) >> 16);
+	cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xFF);
+	cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xF00) >> 8);
+	cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xFF);
+	cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+	cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->com_lock_cmp_en = 0x04;
+	cfg->com_core_clk_en = 0x2C;
+	cfg->com_coreclk_div = HDMI_CORECLK_DIV;
+	cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x10 : 0x0;
+	cfg->com_vco_tune_ctrl = 0x0;
+
+	cfg->tx_l0_lane_mode = 0x43;
+	cfg->tx_l2_lane_mode = 0x43;
+
+	if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x22;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x27;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+	} else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+		cfg->tx_l0_tx_drv_lvl = 0x25;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l1_tx_drv_lvl = 0x25;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l2_tx_drv_lvl = 0x25;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l3_tx_drv_lvl = 0x25;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x23;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0D;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0D;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0D;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x00;
+	} else {
+		cfg->tx_l0_tx_drv_lvl = 0x20;
+		cfg->tx_l0_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l1_tx_drv_lvl = 0x20;
+		cfg->tx_l1_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l2_tx_drv_lvl = 0x20;
+		cfg->tx_l2_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l3_tx_drv_lvl = 0x20;
+		cfg->tx_l3_tx_emp_post1_lvl = 0x20;
+		cfg->tx_l0_vmode_ctrl1 = 0x00;
+		cfg->tx_l0_vmode_ctrl2 = 0x0E;
+		cfg->tx_l1_vmode_ctrl1 = 0x00;
+		cfg->tx_l1_vmode_ctrl2 = 0x0E;
+		cfg->tx_l2_vmode_ctrl1 = 0x00;
+		cfg->tx_l2_vmode_ctrl2 = 0x0E;
+		cfg->tx_l3_vmode_ctrl1 = 0x00;
+		cfg->tx_l3_vmode_ctrl2 = 0x0E;
+	}
+
+	DEV_DBG("HDMI 8996 PLL: PLL Settings\n");
+	DEV_DBG("PLL PARAM: tx_l0_tx_band = 0x%x\n", cfg->tx_l0_tx_band);
+	DEV_DBG("PLL PARAM: tx_l1_tx_band = 0x%x\n", cfg->tx_l1_tx_band);
+	DEV_DBG("PLL PARAM: tx_l2_tx_band = 0x%x\n", cfg->tx_l2_tx_band);
+	DEV_DBG("PLL PARAM: tx_l3_tx_band = 0x%x\n", cfg->tx_l3_tx_band);
+	DEV_DBG("PLL PARAM: com_svs_mode_clk_sel = 0x%x\n",
+						cfg->com_svs_mode_clk_sel);
+	DEV_DBG("PLL PARAM: com_hsclk_sel = 0x%x\n", cfg->com_hsclk_sel);
+	DEV_DBG("PLL PARAM: com_lock_cmp_en = 0x%x\n", cfg->com_lock_cmp_en);
+	DEV_DBG("PLL PARAM: com_pll_cctrl_mode0 = 0x%x\n",
+						cfg->com_pll_cctrl_mode0);
+	DEV_DBG("PLL PARAM: com_pll_rctrl_mode0 = 0x%x\n",
+						cfg->com_pll_rctrl_mode0);
+	DEV_DBG("PLL PARAM: com_cp_ctrl_mode0 = 0x%x\n",
+						cfg->com_cp_ctrl_mode0);
+	DEV_DBG("PLL PARAM: com_dec_start_mode0 = 0x%x\n",
+						cfg->com_dec_start_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start1_mode0 = 0x%x\n",
+						cfg->com_div_frac_start1_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start2_mode0 = 0x%x\n",
+						cfg->com_div_frac_start2_mode0);
+	DEV_DBG("PLL PARAM: com_div_frac_start3_mode0 = 0x%x\n",
+						cfg->com_div_frac_start3_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain0_mode0 = 0x%x\n",
+						cfg->com_integloop_gain0_mode0);
+	DEV_DBG("PLL PARAM: com_integloop_gain1_mode0 = 0x%x\n",
+						cfg->com_integloop_gain1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp1_mode0 = 0x%x\n",
+						cfg->com_lock_cmp1_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp2_mode0 = 0x%x\n",
+						cfg->com_lock_cmp2_mode0);
+	DEV_DBG("PLL PARAM: com_lock_cmp3_mode0 = 0x%x\n",
+						cfg->com_lock_cmp3_mode0);
+	DEV_DBG("PLL PARAM: com_core_clk_en = 0x%x\n", cfg->com_core_clk_en);
+	DEV_DBG("PLL PARAM: com_coreclk_div = 0x%x\n", cfg->com_coreclk_div);
+	DEV_DBG("PLL PARAM: phy_mode = 0x%x\n", cfg->phy_mode);
+
+	DEV_DBG("PLL PARAM: tx_l0_lane_mode = 0x%x\n", cfg->tx_l0_lane_mode);
+	DEV_DBG("PLL PARAM: tx_l2_lane_mode = 0x%x\n", cfg->tx_l2_lane_mode);
+	DEV_DBG("PLL PARAM: l0_tx_drv_lvl = 0x%x\n", cfg->tx_l0_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l0_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l0_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_drv_lvl = 0x%x\n", cfg->tx_l1_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l1_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l1_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_drv_lvl = 0x%x\n", cfg->tx_l2_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l2_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l2_tx_emp_post1_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_drv_lvl = 0x%x\n", cfg->tx_l3_tx_drv_lvl);
+	DEV_DBG("PLL PARAM: l3_tx_emp_post1_lvl = 0x%x\n",
+						cfg->tx_l3_tx_emp_post1_lvl);
+
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl1 = 0x%x\n", cfg->tx_l0_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l0_vmode_ctrl2 = 0x%x\n", cfg->tx_l0_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl1 = 0x%x\n", cfg->tx_l1_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l1_vmode_ctrl2 = 0x%x\n", cfg->tx_l1_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl1 = 0x%x\n", cfg->tx_l2_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l2_vmode_ctrl2 = 0x%x\n", cfg->tx_l2_vmode_ctrl2);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl1 = 0x%x\n", cfg->tx_l3_vmode_ctrl1);
+	DEV_DBG("PLL PARAM: l3_vmode_ctrl2 = 0x%x\n", cfg->tx_l3_vmode_ctrl2);
+	rc = 0;
+fail:
+	return rc;
+}
+
+static int hdmi_8996_calculate(u32 pix_clk,
+			       struct hdmi_8996_phy_pll_reg_cfg *cfg, u32 ver)
+{
+	switch (ver) {
+	case HDMI_VERSION_8996_V3:
+	case HDMI_VERSION_8996_V3_1_8:
+		return hdmi_8996_v3_calculate(pix_clk, cfg);
+	case HDMI_VERSION_8996_V2:
+		return hdmi_8996_v2_calculate(pix_clk, cfg);
+	default:
+		return hdmi_8996_v1_calculate(pix_clk, cfg);
+	}
+}
+
+static int hdmi_8996_phy_pll_set_clk_rate(struct clk *c, u32 tmds_clk, u32 ver)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+
+	rc = hdmi_8996_calculate(tmds_clk, &cfg, ver);
+	if (rc) {
+		DEV_ERR("%s: PLL calculation failed\n", __func__);
+		return rc;
+	}
+
+	/* Initially shut down PHY */
+	DEV_DBG("%s: Disabling PHY\n", __func__);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x0);
+	udelay(500);
+
+	/* Power up sequence */
+	switch (ver) {
+	case HDMI_VERSION_8996_V2:
+	case HDMI_VERSION_8996_V3:
+	case HDMI_VERSION_8996_V3_1_8:
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x04);
+		break;
+	}
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESETSM_CNTRL, 0x20);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX0_TX1_LANE_CTL, 0x0F);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TX2_TX3_LANE_CTL, 0x0F);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				     QSERDES_TX_L0_CLKBUF_ENABLE, 0x03);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		     QSERDES_TX_L0_LANE_MODE, cfg.tx_l0_lane_mode);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		     QSERDES_TX_L0_LANE_MODE, cfg.tx_l2_lane_mode);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		     QSERDES_TX_L0_TX_BAND, cfg.tx_l0_tx_band);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		     QSERDES_TX_L0_TX_BAND, cfg.tx_l1_tx_band);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		     QSERDES_TX_L0_TX_BAND, cfg.tx_l2_tx_band);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		     QSERDES_TX_L0_TX_BAND, cfg.tx_l3_tx_band);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+			       QSERDES_TX_L0_RESET_TSYNC_EN, 0x03);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_BUF_ENABLE, 0x1E);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x07);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SYS_CLK_CTRL, 0x02);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_ENABLE1, 0x0E);
+	if (ver == HDMI_VERSION_8996_V1)
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
+
+	/* Bypass VCO calibration */
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
+					cfg.com_svs_mode_clk_sel);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_TRIM, 0x0F);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_IVCO, 0x0F);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_CTRL,
+			cfg.com_vco_tune_ctrl);
+
+	switch (ver) {
+	case HDMI_VERSION_8996_V1:
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SVS_MODE_CLK_SEL,
+					cfg.com_svs_mode_clk_sel);
+		break;
+	default:
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_BG_CTRL, 0x06);
+	}
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CLK_SELECT, 0x30);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_HSCLK_SEL,
+		       cfg.com_hsclk_sel);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_EN,
+		       cfg.com_lock_cmp_en);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_CCTRL_MODE0,
+		       cfg.com_pll_cctrl_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_PLL_RCTRL_MODE0,
+		       cfg.com_pll_rctrl_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CP_CTRL_MODE0,
+		       cfg.com_cp_ctrl_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEC_START_MODE0,
+		       cfg.com_dec_start_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START1_MODE0,
+		       cfg.com_div_frac_start1_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START2_MODE0,
+		       cfg.com_div_frac_start2_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DIV_FRAC_START3_MODE0,
+		       cfg.com_div_frac_start3_mode0);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+			cfg.com_integloop_gain0_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+			cfg.com_integloop_gain1_mode0);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0,
+			cfg.com_lock_cmp1_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0,
+			cfg.com_lock_cmp2_mode0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0,
+			cfg.com_lock_cmp3_mode0);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE_MAP, 0x00);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORE_CLK_EN,
+		       cfg.com_core_clk_en);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CORECLK_DIV,
+		       cfg.com_coreclk_div);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_CONFIG, 0x02);
+
+	if (ver == HDMI_VERSION_8996_V3 || ver == HDMI_VERSION_8996_V3_1_8)
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESCODE_DIV_NUM, 0x15);
+
+	/* TX lanes setup (TX 0/1/2/3) */
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				0x00000023);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				cfg.tx_l0_tx_drv_lvl);
+	}
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
+		       cfg.tx_l0_tx_emp_post1_lvl);
+
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				0x00000023);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				cfg.tx_l1_tx_drv_lvl);
+	}
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
+		       cfg.tx_l1_tx_emp_post1_lvl);
+
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				0x00000023);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				cfg.tx_l2_tx_drv_lvl);
+	}
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
+		       cfg.tx_l2_tx_emp_post1_lvl);
+
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				0x00000020);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				QSERDES_TX_L0_TX_DRV_LVL,
+				cfg.tx_l3_tx_drv_lvl);
+	}
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_EMP_POST1_LVL,
+		       cfg.tx_l3_tx_emp_post1_lvl);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL1,
+		       cfg.tx_l0_vmode_ctrl1);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL2,
+		       cfg.tx_l0_vmode_ctrl2);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL1,
+		       cfg.tx_l1_vmode_ctrl1);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL2,
+		       cfg.tx_l1_vmode_ctrl2);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL1,
+		       cfg.tx_l2_vmode_ctrl1);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			QSERDES_TX_L0_VMODE_CTRL2,
+			cfg.tx_l2_vmode_ctrl2);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_VMODE_CTRL1,
+		       cfg.tx_l3_vmode_ctrl1);
+	if (ver == HDMI_VERSION_8996_V3_1_8) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				QSERDES_TX_L0_VMODE_CTRL2,
+				0x0000000D);
+	} else {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+				QSERDES_TX_L0_VMODE_CTRL2,
+				cfg.tx_l3_vmode_ctrl2);
+	}
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_TX_DRV_LVL_OFFSET, 0x00);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_RES_CODE_LANE_OFFSET, 0x00);
+
+	if (ver < HDMI_VERSION_8996_V3) {
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			       QSERDES_TX_L0_RES_CODE_LANE_TX,
+			       cfg.tx_l0_res_code_lane_tx);
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+			       QSERDES_TX_L0_RES_CODE_LANE_TX,
+			       cfg.tx_l1_res_code_lane_tx);
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			       QSERDES_TX_L0_RES_CODE_LANE_TX,
+			       cfg.tx_l2_res_code_lane_tx);
+		MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+			       QSERDES_TX_L0_RES_CODE_LANE_TX,
+			       cfg.tx_l3_res_code_lane_tx);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_RESTRIM_CTRL,
+			       cfg.com_restrim_ctrl);
+
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG0, 0x00);
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_TXCAL_CFG1, 0x05);
+	}
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_MODE, cfg.phy_mode);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_PD_CTL, 0x1F);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+			QSERDES_TX_L0_TRAN_DRVR_EMP_EN, 0x03);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+			QSERDES_TX_L0_PARRATE_REC_DETECT_IDLE_EN, 0x40);
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_HP_PD_ENABLES, 0x0C);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_HP_PD_ENABLES, 0x03);
+
+	if (ver == HDMI_VERSION_8996_V2) {
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x01);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x01);
+	}
+	/*
+	 * Ensure that vco configuration gets flushed to hardware before
+	 * enabling the PLL
+	 */
+	wmb();
+	return 0;
+}
+
+static int hdmi_8996_phy_ready_status(struct mdss_pll_resources *io)
+{
+	u32 status = 0;
+	int phy_ready = 0;
+	int rc;
+	u32 read_count = 0;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+		return rc;
+	}
+
+	DEV_DBG("%s: Waiting for PHY Ready\n", __func__);
+
+	/* Poll for PHY read status */
+	while (read_count < HDMI_PLL_POLL_MAX_READS) {
+		status = MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS);
+		if ((status & BIT(0)) == 1) {
+			phy_ready = 1;
+			DEV_DBG("%s: PHY READY\n", __func__);
+			break;
+		}
+		udelay(HDMI_PLL_POLL_TIMEOUT_US);
+		read_count++;
+	}
+
+	if (read_count == HDMI_PLL_POLL_MAX_READS) {
+		phy_ready = 0;
+		DEV_DBG("%s: PHY READY TIMEOUT\n", __func__);
+	}
+
+	mdss_pll_resource_enable(io, false);
+
+	return phy_ready;
+}
+
+static int hdmi_8996_pll_lock_status(struct mdss_pll_resources *io)
+{
+	u32 status;
+	int pll_locked = 0;
+	int rc;
+	u32 read_count = 0;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+		return rc;
+	}
+
+	DEV_DBG("%s: Waiting for PLL lock\n", __func__);
+
+	while (read_count < HDMI_PLL_POLL_MAX_READS) {
+		status = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_C_READY_STATUS);
+		if ((status & BIT(0)) == 1) {
+			pll_locked = 1;
+			DEV_DBG("%s: C READY\n", __func__);
+			break;
+		}
+		udelay(HDMI_PLL_POLL_TIMEOUT_US);
+		read_count++;
+	}
+
+	if (read_count == HDMI_PLL_POLL_MAX_READS) {
+		pll_locked = 0;
+		DEV_DBG("%s: C READY TIMEOUT\n", __func__);
+	}
+
+	mdss_pll_resource_enable(io, false);
+
+	return pll_locked;
+}
+
+static int hdmi_8996_v1_perform_sw_calibration(struct clk *c)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	u32 max_code = 0x190;
+	u32 min_code = 0x0;
+	u32 max_cnt = 0;
+	u32 min_cnt = 0;
+	u32 expected_counter_value = 0;
+	u32 step = 0;
+	u32 dbus_all = 0;
+	u32 dbus_sel = 0;
+	u32 vco_code = 0;
+	u32 val = 0;
+
+	vco_code = 0xC8;
+
+	DEV_DBG("%s: Starting SW calibration with vco_code = %d\n", __func__,
+		 vco_code);
+
+	expected_counter_value =
+	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP3_MODE0) << 16) |
+	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP2_MODE0) << 8) |
+	   (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP1_MODE0));
+
+	DEV_DBG("%s: expected_counter_value = %d\n", __func__,
+		 expected_counter_value);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+	val |= BIT(4);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+	val |= BIT(3);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x4);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+	val |= BIT(1);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+	udelay(60);
+
+	while (1) {
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
+			       vco_code & 0xFF);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
+			       (vco_code >> 8) & 0x3);
+
+		udelay(20);
+
+		val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+		val &= ~BIT(1);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+		udelay(60);
+
+		val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+		val |= BIT(1);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+		udelay(60);
+
+		dbus_all =
+		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS3) << 24) |
+		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS2) << 16) |
+		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS1) << 8) |
+		  (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEBUG_BUS0));
+
+		dbus_sel = (dbus_all >> 9) & 0x3FFFF;
+		DEV_DBG("%s: loop[%d], dbus_all = 0x%x, dbus_sel = 0x%x\n",
+			__func__, step, dbus_all, dbus_sel);
+		if (dbus_sel == 0)
+			DEV_ERR("%s: CHECK HDMI REF CLK\n", __func__);
+
+		if (dbus_sel == expected_counter_value) {
+			max_code = vco_code;
+			max_cnt = dbus_sel;
+			min_code = vco_code;
+			min_cnt = dbus_sel;
+		} else if (dbus_sel == 0) {
+			max_code = vco_code;
+			max_cnt = dbus_sel;
+			vco_code = (max_code + min_code)/2;
+		} else if (dbus_sel > expected_counter_value) {
+			min_code = vco_code;
+			min_cnt = dbus_sel;
+			vco_code = (max_code + min_code)/2;
+		} else if (dbus_sel < expected_counter_value) {
+			max_code = vco_code;
+			max_cnt = dbus_sel;
+			vco_code = (max_code + min_code)/2;
+		}
+
+		step++;
+
+		if ((vco_code == 0) || (vco_code == 0x3FF) || (step > 0x3FF)) {
+			DEV_ERR("%s: VCO tune code search failed\n", __func__);
+			rc = -ENOTSUPP;
+			break;
+		}
+		if ((max_code - min_code) <= 1) {
+			if ((max_code - min_code) == 1) {
+				if (abs((int)(max_cnt - expected_counter_value))
+				    < abs((int)(min_cnt - expected_counter_value
+					))) {
+					vco_code = max_code;
+				} else {
+					vco_code = min_code;
+				}
+			}
+			break;
+		}
+		DEV_DBG("%s: loop[%d], new vco_code = %d\n", __func__, step,
+			 vco_code);
+	}
+
+	DEV_DBG("%s: CALIB done. vco_code = %d\n", __func__, vco_code);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE1_MODE0,
+		       vco_code & 0xFF);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_VCO_TUNE2_MODE0,
+		       (vco_code >> 8) & 0x3);
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_LOCK_CMP_CFG);
+	val &= ~BIT(1);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_LOCK_CMP_CFG, val);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+	val |= BIT(4);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+	val = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_CMN_MISC1);
+	val &= ~BIT(3);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_CMN_MISC1, val);
+
+	return rc;
+}
+
+static int hdmi_8996_v2_perform_sw_calibration(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	u32 vco_code1, vco_code2, integral_loop, ready_poll;
+	u32 read_count = 0;
+
+	while (read_count < (HDMI_PLL_POLL_MAX_READS << 1)) {
+		ready_poll = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_C_READY_STATUS);
+		if ((ready_poll & BIT(0)) == 1) {
+			ready_poll = 1;
+			DEV_DBG("%s: C READY\n", __func__);
+			break;
+		}
+		udelay(HDMI_PLL_POLL_TIMEOUT_US);
+		read_count++;
+	}
+
+	if (read_count == (HDMI_PLL_POLL_MAX_READS << 1)) {
+		ready_poll = 0;
+		DEV_DBG("%s: C READY TIMEOUT, TRYING SW CALIBRATION\n",
+								__func__);
+	}
+
+	vco_code1 = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_PLLCAL_CODE1_STATUS);
+	vco_code2 = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_PLLCAL_CODE2_STATUS);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_DEBUG_BUS_SEL, 0x5);
+	integral_loop = MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_DEBUG_BUS0);
+
+	if (((ready_poll & 0x1) == 0) || (((ready_poll & 1) == 1) &&
+			(vco_code1 == 0xFF) && ((vco_code2 & 0x3) == 0x1) &&
+			(integral_loop > 0xC0))) {
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL1, 0x04);
+		MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_ATB_SEL2, 0x00);
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x17);
+		udelay(100);
+
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x11);
+		udelay(100);
+
+		MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+	}
+	return 0;
+}
+
+static int hdmi_8996_perform_sw_calibration(struct clk *c, u32 ver)
+{
+	switch (ver) {
+	case HDMI_VERSION_8996_V1:
+		return hdmi_8996_v1_perform_sw_calibration(c);
+	case HDMI_VERSION_8996_V2:
+		return hdmi_8996_v2_perform_sw_calibration(c);
+	}
+	return 0;
+}
+
+static int hdmi_8996_vco_enable(struct clk *c, u32 ver)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x1);
+	udelay(100);
+
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+	udelay(100);
+
+	rc = hdmi_8996_perform_sw_calibration(c, ver);
+	if (rc) {
+		DEV_ERR("%s: software calibration failed\n", __func__);
+		return rc;
+	}
+
+	rc = hdmi_8996_pll_lock_status(io);
+	if (!rc) {
+		DEV_ERR("%s: PLL not locked\n", __func__);
+		return rc;
+	}
+
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+		       0x6F);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L1_BASE_OFFSET,
+		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+		       0x6F);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L2_BASE_OFFSET,
+		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+		       0x6F);
+	MDSS_PLL_REG_W(io->pll_base + HDMI_TX_L3_BASE_OFFSET,
+		       QSERDES_TX_L0_HIGHZ_TRANSCEIVEREN_BIAS_DRVR_EN,
+		       0x6F);
+
+	/* Disable SSC */
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER1, 0x0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_PER2, 0x0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE1, 0x0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_STEP_SIZE2, 0x0);
+	MDSS_PLL_REG_W(io->pll_base, QSERDES_COM_SSC_EN_CENTER, 0x2);
+
+	rc = hdmi_8996_phy_ready_status(io);
+	if (!rc) {
+		DEV_ERR("%s: PHY not READY\n", __func__);
+		return rc;
+	}
+
+	/* Restart the retiming buffer */
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x18);
+	udelay(1);
+	MDSS_PLL_REG_W(io->phy_base, HDMI_PHY_CFG, 0x19);
+
+	io->pll_on = true;
+	return 0;
+}
+
+static int hdmi_8996_v1_vco_enable(struct clk *c)
+{
+	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_enable(struct clk *c)
+{
+	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_enable(struct clk *c)
+{
+	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_enable(struct clk *c)
+{
+	return hdmi_8996_vco_enable(c, HDMI_VERSION_8996_V3_1_8);
+}
+
+static int hdmi_8996_vco_get_lock_range(struct clk *c, unsigned long pixel_clk)
+{
+	u32 rng = 64, cmp_cnt = 1024;
+	u32 coreclk_div = 5, clks_pll_divsel = 2;
+	u32 vco_freq, vco_ratio, ppm_range;
+	u64 bclk;
+	struct hdmi_8996_v3_post_divider pd;
+
+	bclk = ((u64)pixel_clk) * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	DEV_DBG("%s: rate=%ld\n", __func__, pixel_clk);
+
+	if (hdmi_8996_v3_get_post_div(&pd, bclk) ||
+		pd.vco_ratio <= 0 || pd.vco_freq <= 0) {
+		DEV_ERR("%s: couldn't get post div\n", __func__);
+		return -EINVAL;
+	}
+
+	do_div(pd.vco_freq, HDMI_KHZ_TO_HZ * HDMI_KHZ_TO_HZ);
+
+	vco_freq  = (u32) pd.vco_freq;
+	vco_ratio = (u32) pd.vco_ratio;
+
+	DEV_DBG("%s: freq %d, ratio %d\n", __func__,
+		vco_freq, vco_ratio);
+
+	ppm_range = (rng * HDMI_REF_CLOCK) / cmp_cnt;
+	ppm_range /= vco_freq / vco_ratio;
+	ppm_range *= coreclk_div * clks_pll_divsel;
+
+	DEV_DBG("%s: ppm range: %d\n", __func__, ppm_range);
+
+	return ppm_range;
+}
+
+static int hdmi_8996_vco_rate_atomic_update(struct clk *c,
+	unsigned long rate, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	void __iomem *pll;
+	struct hdmi_8996_phy_pll_reg_cfg cfg = {0};
+	int rc = 0;
+
+	rc = hdmi_8996_calculate(rate, &cfg, ver);
+	if (rc) {
+		DEV_ERR("%s: PLL calculation failed\n", __func__);
+		goto end;
+	}
+
+	pll = io->pll_base;
+
+	MDSS_PLL_REG_W(pll, QSERDES_COM_DEC_START_MODE0,
+		       cfg.com_dec_start_mode0);
+	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START1_MODE0,
+		       cfg.com_div_frac_start1_mode0);
+	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START2_MODE0,
+		       cfg.com_div_frac_start2_mode0);
+	MDSS_PLL_REG_W(pll, QSERDES_COM_DIV_FRAC_START3_MODE0,
+		       cfg.com_div_frac_start3_mode0);
+
+	MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x01);
+	MDSS_PLL_REG_W(pll, QSERDES_COM_FREQ_UPDATE, 0x00);
+
+	DEV_DBG("%s: updated to rate %ld\n", __func__, rate);
+end:
+	return rc;
+}
+
+static int hdmi_8996_vco_set_rate(struct clk *c, unsigned long rate, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	unsigned int set_power_dwn = 0;
+	bool atomic_update = false;
+	int rc, pll_lock_range;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	DEV_DBG("%s: rate %ld\n", __func__, rate);
+
+	if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0) &&
+		MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+		pll_lock_range = hdmi_8996_vco_get_lock_range(c, vco->rate);
+
+		if (pll_lock_range > 0 && vco->rate) {
+			u32 range_limit;
+
+			range_limit  = vco->rate *
+				(pll_lock_range / HDMI_KHZ_TO_HZ);
+			range_limit /= HDMI_KHZ_TO_HZ;
+
+			DEV_DBG("%s: range limit %d\n", __func__, range_limit);
+
+			if (abs(rate - vco->rate) < range_limit)
+				atomic_update = true;
+		}
+	}
+
+	if (io->pll_on && !atomic_update)
+		set_power_dwn = 1;
+
+	if (atomic_update) {
+		hdmi_8996_vco_rate_atomic_update(c, rate, ver);
+	} else {
+		rc = hdmi_8996_phy_pll_set_clk_rate(c, rate, ver);
+		if (rc)
+			DEV_ERR("%s: Failed to set clk rate\n", __func__);
+	}
+
+	mdss_pll_resource_enable(io, false);
+
+	if (set_power_dwn)
+		hdmi_8996_vco_enable(c, ver);
+
+	vco->rate = rate;
+	vco->rate_set = true;
+
+	return 0;
+}
+
+static int hdmi_8996_v1_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	return hdmi_8996_vco_set_rate(c, rate, HDMI_VERSION_8996_V3_1_8);
+}
+
+static unsigned long hdmi_get_hsclk_sel_divisor(unsigned long hsclk_sel)
+{
+	unsigned long divisor;
+
+	switch (hsclk_sel) {
+	case 0:
+		divisor = 2;
+		break;
+	case 1:
+		divisor = 6;
+		break;
+	case 2:
+		divisor = 10;
+		break;
+	case 3:
+		divisor = 14;
+		break;
+	case 4:
+		divisor = 3;
+		break;
+	case 5:
+		divisor = 9;
+		break;
+	case 6:
+	case 13:
+		divisor = 15;
+		break;
+	case 7:
+		divisor = 21;
+		break;
+	case 8:
+		divisor = 4;
+		break;
+	case 9:
+		divisor = 12;
+		break;
+	case 10:
+		divisor = 20;
+		break;
+	case 11:
+		divisor = 28;
+		break;
+	case 12:
+		divisor = 5;
+		break;
+	case 14:
+		divisor = 25;
+		break;
+	case 15:
+		divisor = 35;
+		break;
+	default:
+		divisor = 1;
+		DEV_ERR("%s: invalid hsclk_sel value = %lu",
+				__func__, hsclk_sel);
+		break;
+	}
+
+	return divisor;
+}
+
+static unsigned long hdmi_8996_vco_get_rate(struct clk *c)
+{
+	unsigned long freq = 0, hsclk_sel = 0, tx_band = 0, dec_start = 0,
+		      div_frac_start = 0, vco_clock_freq = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (mdss_pll_resource_enable(io, true)) {
+		DEV_ERR("%s: pll resource can't be enabled\n", __func__);
+		return freq;
+	}
+
+	dec_start = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_DEC_START_MODE0);
+
+	div_frac_start =
+		MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_DIV_FRAC_START1_MODE0) |
+		MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_DIV_FRAC_START2_MODE0) << 8 |
+		MDSS_PLL_REG_R(io->pll_base,
+				QSERDES_COM_DIV_FRAC_START3_MODE0) << 16;
+
+	vco_clock_freq = (dec_start + (div_frac_start / (1 << 20)))
+		* 4 * (HDMI_REF_CLOCK);
+
+	hsclk_sel = MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_HSCLK_SEL) & 0x15;
+	hsclk_sel = hdmi_get_hsclk_sel_divisor(hsclk_sel);
+	tx_band = MDSS_PLL_REG_R(io->pll_base + HDMI_TX_L0_BASE_OFFSET,
+			QSERDES_TX_L0_TX_BAND) & 0x3;
+
+	freq = vco_clock_freq / (10 * hsclk_sel * (1 << tx_band));
+
+	mdss_pll_resource_enable(io, false);
+
+	DEV_DBG("%s: freq = %lu\n", __func__, freq);
+
+	return freq;
+}
+
+static long hdmi_8996_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+
+	DEV_DBG("rrate=%ld\n", rrate);
+
+	return rrate;
+}
+
+static int hdmi_8996_vco_prepare(struct clk *c, u32 ver)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	int ret = 0;
+
+	DEV_DBG("rate=%ld\n", vco->rate);
+
+	if (!vco->rate_set && vco->rate)
+		ret = hdmi_8996_vco_set_rate(c, vco->rate, ver);
+
+	if (!ret) {
+		ret = mdss_pll_resource_enable(io, true);
+		if (ret)
+			DEV_ERR("pll resource can't be enabled\n");
+	}
+
+	return ret;
+}
+
+static int hdmi_8996_v1_vco_prepare(struct clk *c)
+{
+	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V1);
+}
+
+static int hdmi_8996_v2_vco_prepare(struct clk *c)
+{
+	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V2);
+}
+
+static int hdmi_8996_v3_vco_prepare(struct clk *c)
+{
+	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3);
+}
+
+static int hdmi_8996_v3_1p8_vco_prepare(struct clk *c)
+{
+	return hdmi_8996_vco_prepare(c, HDMI_VERSION_8996_V3_1_8);
+}
+
+static void hdmi_8996_vco_unprepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	vco->rate_set = false;
+
+	if (!io) {
+		DEV_ERR("Invalid input parameter\n");
+		return;
+	}
+
+	if (!io->pll_on &&
+		mdss_pll_resource_enable(io, true)) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return;
+	}
+
+	io->handoff_resources = false;
+	mdss_pll_resource_enable(io, false);
+	io->pll_on = false;
+}
+
+static enum handoff hdmi_8996_vco_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_8996_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (is_gdsc_disabled(io))
+		return HANDOFF_DISABLED_CLK;
+
+	if (mdss_pll_resource_enable(io, true)) {
+		DEV_ERR("pll resource can't be enabled\n");
+		return ret;
+	}
+
+	io->handoff_resources = true;
+
+	if (MDSS_PLL_REG_R(io->pll_base, QSERDES_COM_C_READY_STATUS) & BIT(0)) {
+		if (MDSS_PLL_REG_R(io->phy_base, HDMI_PHY_STATUS) & BIT(0)) {
+			io->pll_on = true;
+			c->rate = hdmi_8996_vco_get_rate(c);
+			vco->rate = c->rate;
+			ret = HANDOFF_ENABLED_CLK;
+		} else {
+			io->handoff_resources = false;
+			mdss_pll_resource_enable(io, false);
+			DEV_DBG("%s: PHY not ready\n", __func__);
+		}
+	} else {
+		io->handoff_resources = false;
+		mdss_pll_resource_enable(io, false);
+		DEV_DBG("%s: PLL not locked\n", __func__);
+	}
+
+	DEV_DBG("done, ret=%d\n", ret);
+	return ret;
+}
+
+static const struct clk_ops hdmi_8996_v1_vco_clk_ops = {
+	.enable = hdmi_8996_v1_vco_enable,
+	.set_rate = hdmi_8996_v1_vco_set_rate,
+	.get_rate = hdmi_8996_vco_get_rate,
+	.round_rate = hdmi_8996_vco_round_rate,
+	.prepare = hdmi_8996_v1_vco_prepare,
+	.unprepare = hdmi_8996_vco_unprepare,
+	.handoff = hdmi_8996_vco_handoff,
+};
+
+static const struct clk_ops hdmi_8996_v2_vco_clk_ops = {
+	.enable = hdmi_8996_v2_vco_enable,
+	.set_rate = hdmi_8996_v2_vco_set_rate,
+	.get_rate = hdmi_8996_vco_get_rate,
+	.round_rate = hdmi_8996_vco_round_rate,
+	.prepare = hdmi_8996_v2_vco_prepare,
+	.unprepare = hdmi_8996_vco_unprepare,
+	.handoff = hdmi_8996_vco_handoff,
+};
+
+static const struct clk_ops hdmi_8996_v3_vco_clk_ops = {
+	.enable = hdmi_8996_v3_vco_enable,
+	.set_rate = hdmi_8996_v3_vco_set_rate,
+	.get_rate = hdmi_8996_vco_get_rate,
+	.round_rate = hdmi_8996_vco_round_rate,
+	.prepare = hdmi_8996_v3_vco_prepare,
+	.unprepare = hdmi_8996_vco_unprepare,
+	.handoff = hdmi_8996_vco_handoff,
+};
+
+static const struct clk_ops hdmi_8996_v3_1p8_vco_clk_ops = {
+	.enable = hdmi_8996_v3_1p8_vco_enable,
+	.set_rate = hdmi_8996_v3_1p8_vco_set_rate,
+	.get_rate = hdmi_8996_vco_get_rate,
+	.round_rate = hdmi_8996_vco_round_rate,
+	.prepare = hdmi_8996_v3_1p8_vco_prepare,
+	.unprepare = hdmi_8996_vco_unprepare,
+	.handoff = hdmi_8996_vco_handoff,
+};
+
+
+static struct hdmi_pll_vco_clk hdmi_vco_clk = {
+	.c = {
+		.dbg_name = "hdmi_8996_vco_clk",
+		.ops = &hdmi_8996_v1_vco_clk_ops,
+		CLK_INIT(hdmi_vco_clk.c),
+	},
+};
+
+static struct clk_lookup hdmipllcc_8996[] = {
+	CLK_LIST(hdmi_vco_clk),
+};
+
+int hdmi_8996_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res, u32 ver)
+{
+	int rc = -ENOTSUPP;
+
+	if (!pll_res || !pll_res->phy_base || !pll_res->pll_base) {
+		DEV_ERR("%s: Invalid input parameters\n", __func__);
+		return -EPROBE_DEFER;
+	}
+
+	/* Set client data for vco, mux and div clocks */
+	hdmi_vco_clk.priv = pll_res;
+
+	switch (ver) {
+	case HDMI_VERSION_8996_V2:
+		hdmi_vco_clk.c.ops = &hdmi_8996_v2_vco_clk_ops;
+		break;
+	case HDMI_VERSION_8996_V3:
+		hdmi_vco_clk.c.ops = &hdmi_8996_v3_vco_clk_ops;
+		break;
+	case HDMI_VERSION_8996_V3_1_8:
+		hdmi_vco_clk.c.ops = &hdmi_8996_v3_1p8_vco_clk_ops;
+		break;
+	default:
+		hdmi_vco_clk.c.ops = &hdmi_8996_v1_vco_clk_ops;
+		break;
+	}
+
+	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8996,
+					ARRAY_SIZE(hdmipllcc_8996));
+	if (rc) {
+		DEV_ERR("%s: Clock register failed rc=%d\n", __func__, rc);
+		rc = -EPROBE_DEFER;
+	} else {
+		DEV_DBG("%s SUCCESS\n", __func__);
+	}
+
+	return rc;
+}
+
+int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8996_pll_clock_register(pdev, pll_res,
+						HDMI_VERSION_8996_V1);
+}
+
+int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8996_pll_clock_register(pdev, pll_res,
+						HDMI_VERSION_8996_V2);
+}
+
+int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8996_pll_clock_register(pdev, pll_res,
+						HDMI_VERSION_8996_V3);
+}
+
+int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res)
+{
+	return hdmi_8996_pll_clock_register(pdev, pll_res,
+						HDMI_VERSION_8996_V3_1_8);
+}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll-8998.c b/drivers/clk/qcom/mdss/mdss-hdmi-pll-8998.c
new file mode 100644
index 0000000..eda364a
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-hdmi-pll-8998.c
@@ -0,0 +1,827 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/clk/msm-clk-provider.h>
+#include <linux/clk/msm-clk.h>
+#include <linux/clk/msm-clock-generic.h>
+#include <dt-bindings/clock/msm-clocks-8998.h>
+
+#include "mdss-pll.h"
+#include "mdss-hdmi-pll.h"
+
+#define _W(x, y, z) MDSS_PLL_REG_W(x, y, z)
+#define _R(x, y)    MDSS_PLL_REG_R(x, y)
+
+/* PLL REGISTERS */
+#define BIAS_EN_CLKBUFLR_EN          (0x034)
+#define CLK_ENABLE1                  (0x038)
+#define SYS_CLK_CTRL                 (0x03C)
+#define SYSCLK_BUF_ENABLE            (0x040)
+#define PLL_IVCO                     (0x048)
+#define CP_CTRL_MODE0                (0x060)
+#define PLL_RCTRL_MODE0              (0x068)
+#define PLL_CCTRL_MODE0              (0x070)
+#define SYSCLK_EN_SEL                (0x080)
+#define RESETSM_CNTRL                (0x088)
+#define LOCK_CMP_EN                  (0x090)
+#define LOCK_CMP1_MODE0              (0x098)
+#define LOCK_CMP2_MODE0              (0x09C)
+#define LOCK_CMP3_MODE0              (0x0A0)
+#define DEC_START_MODE0              (0x0B0)
+#define DIV_FRAC_START1_MODE0        (0x0B8)
+#define DIV_FRAC_START2_MODE0        (0x0BC)
+#define DIV_FRAC_START3_MODE0        (0x0C0)
+#define INTEGLOOP_GAIN0_MODE0        (0x0D8)
+#define INTEGLOOP_GAIN1_MODE0        (0x0DC)
+#define VCO_TUNE_CTRL                (0x0EC)
+#define VCO_TUNE_MAP                 (0x0F0)
+#define CLK_SELECT                   (0x138)
+#define HSCLK_SEL                    (0x13C)
+#define CORECLK_DIV_MODE0            (0x148)
+#define CORE_CLK_EN                  (0x154)
+#define C_READY_STATUS               (0x158)
+#define SVS_MODE_CLK_SEL             (0x164)
+
+/* Tx Channel PHY registers */
+#define PHY_TX_EMP_POST1_LVL(n)              ((((n) * 0x200) + 0x400) + 0x000)
+#define PHY_TX_INTERFACE_SELECT_TX_BAND(n)   ((((n) * 0x200) + 0x400) + 0x008)
+#define PHY_TX_CLKBUF_TERM_ENABLE(n)         ((((n) * 0x200) + 0x400) + 0x00C)
+#define PHY_TX_DRV_LVL_RES_CODE_OFFSET(n)    ((((n) * 0x200) + 0x400) + 0x014)
+#define PHY_TX_DRV_LVL(n)                    ((((n) * 0x200) + 0x400) + 0x018)
+#define PHY_TX_LANE_CONFIG(n)                ((((n) * 0x200) + 0x400) + 0x01C)
+#define PHY_TX_PRE_DRIVER_1(n)               ((((n) * 0x200) + 0x400) + 0x024)
+#define PHY_TX_PRE_DRIVER_2(n)               ((((n) * 0x200) + 0x400) + 0x028)
+#define PHY_TX_LANE_MODE(n)                  ((((n) * 0x200) + 0x400) + 0x02C)
+
+/* HDMI PHY registers */
+#define PHY_CFG                      (0x00)
+#define PHY_PD_CTL                   (0x04)
+#define PHY_MODE                     (0x10)
+#define PHY_CLOCK                    (0x5C)
+#define PHY_CMN_CTRL                 (0x68)
+#define PHY_STATUS                   (0xB4)
+
+#define HDMI_BIT_CLK_TO_PIX_CLK_RATIO		10
+#define HDMI_MHZ_TO_HZ				1000000
+#define HDMI_HZ_TO_MHZ				1000000
+#define HDMI_REF_CLOCK_MHZ			19.2
+#define HDMI_REF_CLOCK_HZ			(HDMI_REF_CLOCK_MHZ * 1000000)
+#define HDMI_VCO_MIN_RATE_HZ			25000000
+#define HDMI_VCO_MAX_RATE_HZ			600000000
+
+struct 8998_reg_cfg {
+	u32 tx_band;
+	u32 svs_mode_clk_sel;
+	u32 hsclk_sel;
+	u32 lock_cmp_en;
+	u32 cctrl_mode0;
+	u32 rctrl_mode0;
+	u32 cpctrl_mode0;
+	u32 dec_start_mode0;
+	u32 div_frac_start1_mode0;
+	u32 div_frac_start2_mode0;
+	u32 div_frac_start3_mode0;
+	u32 integloop_gain0_mode0;
+	u32 integloop_gain1_mode0;
+	u32 lock_cmp1_mode0;
+	u32 lock_cmp2_mode0;
+	u32 lock_cmp3_mode0;
+	u32 ssc_per1;
+	u32 ssc_per2;
+	u32 ssc_step_size1;
+	u32 ssc_step_size2;
+	u32 core_clk_en;
+	u32 coreclk_div_mode0;
+	u32 phy_mode;
+	u32 vco_freq;
+	u32 hsclk_divsel;
+	u32 vco_ratio;
+	u32 ssc_en_center;
+
+	u32 l0_tx_drv_lvl;
+	u32 l0_tx_emp_post1_lvl;
+	u32 l1_tx_drv_lvl;
+	u32 l1_tx_emp_post1_lvl;
+	u32 l2_tx_drv_lvl;
+	u32 l2_tx_emp_post1_lvl;
+	u32 l3_tx_drv_lvl;
+	u32 l3_tx_emp_post1_lvl;
+
+	u32 l0_pre_driver_1;
+	u32 l0_pre_driver_2;
+	u32 l1_pre_driver_1;
+	u32 l1_pre_driver_2;
+	u32 l2_pre_driver_1;
+	u32 l2_pre_driver_2;
+	u32 l3_pre_driver_1;
+	u32 l3_pre_driver_2;
+
+	bool debug;
+};
+
+static void hdmi_8998_get_div(struct 8998_reg_cfg * cfg, unsigned long pclk)
+{
+	u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
+				     9, 10, 12, 15, 25};
+	u32 const band_list[] = {0, 1, 2, 3};
+	u32 const sz_ratio = ARRAY_SIZE(ratio_list);
+	u32 const sz_band = ARRAY_SIZE(band_list);
+	u32 const min_freq = 8000, max_freq = 12000;
+	u32 const cmp_cnt = 1024;
+	u32 const th_min = 500, th_max = 1000;
+	u64 bit_clk = pclk * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+	u32 half_rate_mode = 0;
+	u32 freq_optimal, list_elements;
+	int optimal_index;
+	u32 i, j, k;
+	u32 freq_list[sz_ratio * sz_band];
+	u32 found_hsclk_divsel = 0, found_vco_ratio;
+	u32 found_tx_band_sel, found_vco_freq;
+
+find_optimal_index:
+	freq_optimal = max_freq;
+	optimal_index = -1;
+	list_elements = 0;
+
+	for (i = 0; i < sz_ratio; i++) {
+		for (j = 0; j < sz_band; j++) {
+			u64 freq = (bit_clk / (1 << half_rate_mode));
+
+			freq *= (ratio_list[i] * (1 << band_list[j]));
+			do_div(freq, (u64) HDMI_MHZ_TO_HZ);
+			freq_list[list_elements++] = freq;
+		}
+	}
+
+	for (k = 0; k < ARRAY_SIZE(freq_list); k++) {
+		u32 const clks_pll_div = 2, core_clk_div = 5;
+		u32 const rng1 = 16, rng2 = 8;
+		u32 core_clk, rvar1;
+		u32 th1, th2;
+
+		core_clk = (((freq_list[k] /
+			      ratio_list[k / sz_band]) /
+			      clks_pll_div) / core_clk_div);
+
+		rvar1 = HDMI_REF_CLOCK_HZ / cmp_cnt;
+		rvar1 *= rng1;
+		rvar1 /= core_clk;
+
+		th1 = rvar1;
+
+		rvar1 = HDMI_REF_CLOCK_HZ / cmp_cnt;
+		rvar1 *= rng2;
+		rvar1 /= core_clk;
+
+		th2 = rvar1;
+
+		if (freq_list[k] >= min_freq &&
+				freq_list[k] <= max_freq) {
+			if ((th1 >= th_min && th1 <= th_max) ||
+					(th2 >= th_min && th2 <= th_max)) {
+				if (freq_list[k] <= freq_optimal) {
+					freq_optimal = freq_list[k];
+					optimal_index = k;
+				}
+			}
+		}
+	}
+
+	if (optimal_index == -1) {
+		if (!half_rate_mode) {
+			half_rate_mode = 1;
+			goto find_optimal_index;
+		} else {
+			/* set to default values */
+			found_vco_freq = max_freq;
+			found_hsclk_divsel = 0;
+			found_vco_ratio = 2;
+			found_tx_band_sel = 0;
+			pr_err("Config error for pclk %ld\n", pclk);
+		}
+	} else {
+		found_vco_ratio = ratio_list[optimal_index / sz_band];
+		found_tx_band_sel = band_list[optimal_index % sz_band];
+		found_vco_freq = freq_optimal;
+	}
+
+	switch (found_vco_ratio) {
+	case 1:
+		found_hsclk_divsel = 15;
+		break;
+	case 2:
+		found_hsclk_divsel = 0;
+		break;
+	case 3:
+		found_hsclk_divsel = 4;
+		break;
+	case 4:
+		found_hsclk_divsel = 8;
+		break;
+	case 5:
+		found_hsclk_divsel = 12;
+		break;
+	case 6:
+		found_hsclk_divsel = 1;
+		break;
+	case 9:
+		found_hsclk_divsel = 5;
+		break;
+	case 10:
+		found_hsclk_divsel = 2;
+		break;
+	case 12:
+		found_hsclk_divsel = 9;
+		break;
+	case 15:
+		found_hsclk_divsel = 13;
+		break;
+	case 25:
+		found_hsclk_divsel = 14;
+		break;
+	}
+
+	pr_debug("found_vco_freq=%d\n", found_vco_freq);
+	pr_debug("found_hsclk_divsel=%d\n", found_hsclk_divsel);
+	pr_debug("found_vco_ratio=%d\n", found_vco_ratio);
+	pr_debug("found_tx_band_sel=%d\n", found_tx_band_sel);
+	pr_debug("half_rate_mode=%d\n", half_rate_mode);
+	pr_debug("optimal_index=%d\n", optimal_index);
+
+	cfg->vco_freq = found_vco_freq;
+	cfg->hsclk_divsel = found_hsclk_divsel;
+	cfg->vco_ratio = found_vco_ratio;
+	cfg->tx_band = found_tx_band_sel;
+}
+
+static int hdmi_8998_config_phy(unsigned long rate,
+		struct 8998_reg_cfg * cfg)
+{
+	u64 const high_freq_bit_clk_threshold = 3400000000UL;
+	u64 const dig_freq_bit_clk_threshold = 1500000000UL;
+	u64 const mid_freq_bit_clk_threshold = 750000000;
+	u64 fdata, tmds_clk;
+	u64 pll_div = 4 * HDMI_REF_CLOCK_HZ;
+	u64 bclk;
+	u64 vco_freq_mhz;
+	u64 hsclk_sel, dec_start, div_frac_start;
+	u64 rem;
+	u64 cpctrl, rctrl, cctrl;
+	u64 integloop_gain;
+	u32 digclk_divsel;
+	u32 tmds_bclk_ratio;
+	u64 cmp_rng, cmp_cnt = 1024, pll_cmp;
+	bool gen_ssc = false;
+
+	bclk = rate * HDMI_BIT_CLK_TO_PIX_CLK_RATIO;
+
+	if (bclk > high_freq_bit_clk_threshold) {
+		tmds_clk = rate / 4;
+		tmds_bclk_ratio = 1;
+	} else {
+		tmds_clk = rate;
+		tmds_bclk_ratio = 0;
+	}
+
+	hdmi_8998_get_div(cfg, rate);
+
+	vco_freq_mhz = cfg->vco_freq * (u64) HDMI_HZ_TO_MHZ;
+	fdata = cfg->vco_freq;
+	do_div(fdata, cfg->vco_ratio);
+
+	hsclk_sel = cfg->hsclk_divsel;
+	dec_start = vco_freq_mhz;
+	do_div(dec_start, pll_div);
+
+	div_frac_start = vco_freq_mhz * (1 << 20);
+	rem = do_div(div_frac_start, pll_div);
+	div_frac_start -= (dec_start * (1 << 20));
+	if (rem > (pll_div >> 1))
+		div_frac_start++;
+
+	if ((div_frac_start != 0) || gen_ssc) {
+		cpctrl = 0x8;
+		rctrl = 0x16;
+		cctrl = 0x34;
+	} else {
+		cpctrl = 0x30;
+		rctrl = 0x18;
+		cctrl = 0x2;
+	}
+
+	digclk_divsel = (bclk > dig_freq_bit_clk_threshold) ? 0x1 : 0x2;
+
+	integloop_gain = ((div_frac_start != 0) ||
+			gen_ssc) ? 0x3F : 0xC4;
+	integloop_gain <<= digclk_divsel;
+	integloop_gain = (integloop_gain <= 2046 ? integloop_gain : 0x7FE);
+
+	cmp_rng = gen_ssc ? 0x40 : 0x10;
+
+	pll_cmp = cmp_cnt * fdata;
+	rem = do_div(pll_cmp, (u64)(HDMI_REF_CLOCK_MHZ * 10));
+	if (rem > ((u64)(HDMI_REF_CLOCK_MHZ * 10) >> 1))
+		pll_cmp++;
+
+	pll_cmp =  pll_cmp - 1;
+
+	pr_debug("VCO_FREQ = %u\n", cfg->vco_freq);
+	pr_debug("FDATA = %llu\n", fdata);
+	pr_debug("DEC_START = %llu\n", dec_start);
+	pr_debug("DIV_FRAC_START = %llu\n", div_frac_start);
+	pr_debug("CPCTRL = %llu\n", cpctrl);
+	pr_debug("RCTRL = %llu\n", rctrl);
+	pr_debug("CCTRL = %llu\n", cctrl);
+	pr_debug("DIGCLK_DIVSEL = %u\n", digclk_divsel);
+	pr_debug("INTEGLOOP_GAIN = %llu\n", integloop_gain);
+	pr_debug("CMP_RNG = %llu\n", cmp_rng);
+	pr_debug("PLL_CMP = %llu\n", pll_cmp);
+
+	cfg->svs_mode_clk_sel = (digclk_divsel & 0xFF);
+	cfg->hsclk_sel = (0x20 | hsclk_sel);
+	cfg->lock_cmp_en = (gen_ssc ? 0x4 : 0x0);
+	cfg->cctrl_mode0 = (cctrl & 0xFF);
+	cfg->rctrl_mode0 = (rctrl & 0xFF);
+	cfg->cpctrl_mode0 = (cpctrl & 0xFF);
+	cfg->dec_start_mode0 = (dec_start & 0xFF);
+	cfg->div_frac_start1_mode0 = (div_frac_start & 0xFF);
+	cfg->div_frac_start2_mode0 = ((div_frac_start & 0xFF00) >> 8);
+	cfg->div_frac_start3_mode0 = ((div_frac_start & 0xF0000) >> 16);
+	cfg->integloop_gain0_mode0 = (integloop_gain & 0xFF);
+	cfg->integloop_gain1_mode0 = (integloop_gain & 0xF00) >> 8;
+	cfg->lock_cmp1_mode0 = (pll_cmp & 0xFF);
+	cfg->lock_cmp2_mode0 = ((pll_cmp & 0xFF00) >> 8);
+	cfg->lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+	cfg->ssc_per1 = 0;
+	cfg->ssc_per2 = 0;
+	cfg->ssc_step_size1 = 0;
+	cfg->ssc_step_size2 = 0;
+	cfg->core_clk_en = 0x2C;
+	cfg->coreclk_div_mode0 = 0x5;
+	cfg->phy_mode = (tmds_bclk_ratio ? 0x5 : 0x4);
+	cfg->ssc_en_center = 0x0;
+
+	if (bclk > high_freq_bit_clk_threshold) {
+		cfg->l0_tx_drv_lvl = 0xA;
+		cfg->l0_tx_emp_post1_lvl = 0x3;
+		cfg->l1_tx_drv_lvl = 0xA;
+		cfg->l1_tx_emp_post1_lvl = 0x3;
+		cfg->l2_tx_drv_lvl = 0xA;
+		cfg->l2_tx_emp_post1_lvl = 0x3;
+		cfg->l3_tx_drv_lvl = 0x8;
+		cfg->l3_tx_emp_post1_lvl = 0x3;
+		cfg->l0_pre_driver_1 = 0x0;
+		cfg->l0_pre_driver_2 = 0x1C;
+		cfg->l1_pre_driver_1 = 0x0;
+		cfg->l1_pre_driver_2 = 0x1C;
+		cfg->l2_pre_driver_1 = 0x0;
+		cfg->l2_pre_driver_2 = 0x1C;
+		cfg->l3_pre_driver_1 = 0x0;
+		cfg->l3_pre_driver_2 = 0x0;
+	} else if (bclk > dig_freq_bit_clk_threshold) {
+		cfg->l0_tx_drv_lvl = 0x9;
+		cfg->l0_tx_emp_post1_lvl = 0x3;
+		cfg->l1_tx_drv_lvl = 0x9;
+		cfg->l1_tx_emp_post1_lvl = 0x3;
+		cfg->l2_tx_drv_lvl = 0x9;
+		cfg->l2_tx_emp_post1_lvl = 0x3;
+		cfg->l3_tx_drv_lvl = 0x8;
+		cfg->l3_tx_emp_post1_lvl = 0x3;
+		cfg->l0_pre_driver_1 = 0x0;
+		cfg->l0_pre_driver_2 = 0x16;
+		cfg->l1_pre_driver_1 = 0x0;
+		cfg->l1_pre_driver_2 = 0x16;
+		cfg->l2_pre_driver_1 = 0x0;
+		cfg->l2_pre_driver_2 = 0x16;
+		cfg->l3_pre_driver_1 = 0x0;
+		cfg->l3_pre_driver_2 = 0x0;
+	} else if (bclk > mid_freq_bit_clk_threshold) {
+		cfg->l0_tx_drv_lvl = 0x9;
+		cfg->l0_tx_emp_post1_lvl = 0x3;
+		cfg->l1_tx_drv_lvl = 0x9;
+		cfg->l1_tx_emp_post1_lvl = 0x3;
+		cfg->l2_tx_drv_lvl = 0x9;
+		cfg->l2_tx_emp_post1_lvl = 0x3;
+		cfg->l3_tx_drv_lvl = 0x8;
+		cfg->l3_tx_emp_post1_lvl = 0x3;
+		cfg->l0_pre_driver_1 = 0x0;
+		cfg->l0_pre_driver_2 = 0x0E;
+		cfg->l1_pre_driver_1 = 0x0;
+		cfg->l1_pre_driver_2 = 0x0E;
+		cfg->l2_pre_driver_1 = 0x0;
+		cfg->l2_pre_driver_2 = 0x0E;
+		cfg->l3_pre_driver_1 = 0x0;
+		cfg->l3_pre_driver_2 = 0x0;
+	} else {
+		cfg->l0_tx_drv_lvl = 0x0;
+		cfg->l0_tx_emp_post1_lvl = 0x0;
+		cfg->l1_tx_drv_lvl = 0x0;
+		cfg->l1_tx_emp_post1_lvl = 0x0;
+		cfg->l2_tx_drv_lvl = 0x0;
+		cfg->l2_tx_emp_post1_lvl = 0x0;
+		cfg->l3_tx_drv_lvl = 0x0;
+		cfg->l3_tx_emp_post1_lvl = 0x0;
+		cfg->l0_pre_driver_1 = 0x0;
+		cfg->l0_pre_driver_2 = 0x01;
+		cfg->l1_pre_driver_1 = 0x0;
+		cfg->l1_pre_driver_2 = 0x01;
+		cfg->l2_pre_driver_1 = 0x0;
+		cfg->l2_pre_driver_2 = 0x01;
+		cfg->l3_pre_driver_1 = 0x0;
+		cfg->l3_pre_driver_2 = 0x0;
+	}
+
+	return 0;
+}
+
+static int hdmi_8998_pll_set_clk_rate(struct clk *c, unsigned long rate)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	struct 8998_reg_cfg cfg = {0};
+	void __iomem *phy = io->phy_base, *pll = io->pll_base;
+
+	rc = hdmi_8998_config_phy(rate, &cfg);
+	if (rc) {
+		pr_err("rate calculation failed\n, rc=%d\n", rc);
+		return rc;
+	}
+
+	_W(phy, PHY_PD_CTL, 0x0);
+	udelay(500);
+
+	_W(phy, PHY_PD_CTL, 0x1);
+	_W(pll, RESETSM_CNTRL, 0x20);
+	_W(phy, PHY_CMN_CTRL, 0x6);
+	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(0), cfg.tx_band);
+	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(1), cfg.tx_band);
+	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(2), cfg.tx_band);
+	_W(pll, PHY_TX_INTERFACE_SELECT_TX_BAND(3), cfg.tx_band);
+	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(0), 0x1);
+	_W(pll, PHY_TX_LANE_MODE(0), 0x20);
+	_W(pll, PHY_TX_LANE_MODE(1), 0x20);
+	_W(pll, PHY_TX_LANE_MODE(2), 0x20);
+	_W(pll, PHY_TX_LANE_MODE(3), 0x20);
+	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(1), 0x1);
+	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(2), 0x1);
+	_W(pll, PHY_TX_CLKBUF_TERM_ENABLE(3), 0x1);
+	_W(pll, SYSCLK_BUF_ENABLE, 0x2);
+	_W(pll, BIAS_EN_CLKBUFLR_EN, 0xB);
+	_W(pll, SYSCLK_EN_SEL, 0x37);
+	_W(pll, SYS_CLK_CTRL, 0x2);
+	_W(pll, CLK_ENABLE1, 0xE);
+	_W(pll, PLL_IVCO, 0xF);
+	_W(pll, VCO_TUNE_CTRL, 0x0);
+	_W(pll, SVS_MODE_CLK_SEL, cfg.svs_mode_clk_sel);
+	_W(pll, CLK_SELECT, 0x30);
+	_W(pll, HSCLK_SEL, cfg.hsclk_sel);
+	_W(pll, LOCK_CMP_EN, cfg.lock_cmp_en);
+	_W(pll, PLL_CCTRL_MODE0, cfg.cctrl_mode0);
+	_W(pll, PLL_RCTRL_MODE0, cfg.rctrl_mode0);
+	_W(pll, CP_CTRL_MODE0, cfg.cpctrl_mode0);
+	_W(pll, DEC_START_MODE0, cfg.dec_start_mode0);
+	_W(pll, DIV_FRAC_START1_MODE0, cfg.div_frac_start1_mode0);
+	_W(pll, DIV_FRAC_START2_MODE0, cfg.div_frac_start2_mode0);
+	_W(pll, DIV_FRAC_START3_MODE0, cfg.div_frac_start3_mode0);
+	_W(pll, INTEGLOOP_GAIN0_MODE0, cfg.integloop_gain0_mode0);
+	_W(pll, INTEGLOOP_GAIN1_MODE0, cfg.integloop_gain1_mode0);
+	_W(pll, LOCK_CMP1_MODE0, cfg.lock_cmp1_mode0);
+	_W(pll, LOCK_CMP2_MODE0, cfg.lock_cmp2_mode0);
+	_W(pll, LOCK_CMP3_MODE0, cfg.lock_cmp3_mode0);
+	_W(pll, VCO_TUNE_MAP, 0x0);
+	_W(pll, CORE_CLK_EN, cfg.core_clk_en);
+	_W(pll, CORECLK_DIV_MODE0, cfg.coreclk_div_mode0);
+
+	_W(pll, PHY_TX_DRV_LVL(0), cfg.l0_tx_drv_lvl);
+	_W(pll, PHY_TX_DRV_LVL(1), cfg.l1_tx_drv_lvl);
+	_W(pll, PHY_TX_DRV_LVL(2), cfg.l2_tx_drv_lvl);
+	_W(pll, PHY_TX_DRV_LVL(3), cfg.l3_tx_drv_lvl);
+
+	_W(pll, PHY_TX_EMP_POST1_LVL(0), cfg.l0_tx_emp_post1_lvl);
+	_W(pll, PHY_TX_EMP_POST1_LVL(1), cfg.l1_tx_emp_post1_lvl);
+	_W(pll, PHY_TX_EMP_POST1_LVL(2), cfg.l2_tx_emp_post1_lvl);
+	_W(pll, PHY_TX_EMP_POST1_LVL(3), cfg.l3_tx_emp_post1_lvl);
+
+	_W(pll, PHY_TX_PRE_DRIVER_1(0), cfg.l0_pre_driver_1);
+	_W(pll, PHY_TX_PRE_DRIVER_1(1), cfg.l1_pre_driver_1);
+	_W(pll, PHY_TX_PRE_DRIVER_1(2), cfg.l2_pre_driver_1);
+	_W(pll, PHY_TX_PRE_DRIVER_1(3), cfg.l3_pre_driver_1);
+
+	_W(pll, PHY_TX_PRE_DRIVER_2(0), cfg.l0_pre_driver_2);
+	_W(pll, PHY_TX_PRE_DRIVER_2(1), cfg.l1_pre_driver_2);
+	_W(pll, PHY_TX_PRE_DRIVER_2(2), cfg.l2_pre_driver_2);
+	_W(pll, PHY_TX_PRE_DRIVER_2(3), cfg.l3_pre_driver_2);
+
+	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(0), 0x0);
+	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(1), 0x0);
+	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(2), 0x0);
+	_W(pll, PHY_TX_DRV_LVL_RES_CODE_OFFSET(3), 0x0);
+
+	_W(phy, PHY_MODE, cfg.phy_mode);
+
+	_W(pll, PHY_TX_LANE_CONFIG(0), 0x10);
+	_W(pll, PHY_TX_LANE_CONFIG(1), 0x10);
+	_W(pll, PHY_TX_LANE_CONFIG(2), 0x10);
+	_W(pll, PHY_TX_LANE_CONFIG(3), 0x10);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	return 0;
+}
+
+static int hdmi_8998_pll_lock_status(struct mdss_pll_resources *io)
+{
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+	u32 status;
+	int rc = 0;
+	void __iomem *pll = io->pll_base;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+	rc = readl_poll_timeout_atomic(pll + C_READY_STATUS,
+			status,
+			((status & BIT(0)) > 0),
+			delay_us,
+			timeout_us);
+	if (rc)
+		pr_err("HDMI PLL(%d) lock failed, status=0x%08x\n",
+				io->index, status);
+	else
+		pr_debug("HDMI PLL(%d) lock passed, status=0x%08x\n",
+				io->index, status);
+
+	mdss_pll_resource_enable(io, false);
+
+	return rc;
+}
+
+static int hdmi_8998_phy_ready_status(struct mdss_pll_resources *io)
+{
+	u32 const delay_us = 100;
+	u32 const timeout_us = 5000;
+	u32 status;
+	int rc = 0;
+	void __iomem *phy = io->phy_base;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource can't be enabled\n");
+		return rc;
+	}
+
+	rc = readl_poll_timeout_atomic(phy + PHY_STATUS,
+			status,
+			((status & BIT(0)) > 0),
+			delay_us,
+			timeout_us);
+	if (rc)
+		pr_err("HDMI PHY(%d) not ready, status=0x%08x\n",
+				io->index, status);
+	else
+		pr_debug("HDMI PHY(%d) ready, status=0x%08x\n",
+				io->index, status);
+
+	mdss_pll_resource_enable(io, false);
+
+	return rc;
+}
+
+static int hdmi_8998_vco_set_rate(struct clk *c, unsigned long rate)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource enable failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (io->pll_on)
+		goto error;
+
+	rc = hdmi_8998_pll_set_clk_rate(c, rate);
+	if (rc) {
+		pr_err("failed to set clk rate, rc=%d\n", rc);
+		goto error;
+	}
+
+	vco->rate = rate;
+	vco->rate_set = true;
+
+error:
+	(void)mdss_pll_resource_enable(io, false);
+
+	return rc;
+}
+
+static long hdmi_8998_vco_round_rate(struct clk *c, unsigned long rate)
+{
+	unsigned long rrate = rate;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	return rrate;
+}
+
+static int hdmi_8998_pll_enable(struct clk *c)
+{
+	int rc = 0;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	void __iomem *phy = io->phy_base, *pll = io->pll_base;
+
+	_W(phy, PHY_CFG, 0x1);
+	udelay(100);
+	_W(phy, PHY_CFG, 0x59);
+	udelay(100);
+
+	_W(phy, PHY_CLOCK, 0x6);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	rc = hdmi_8998_pll_lock_status(io);
+	if (rc) {
+		pr_err("PLL not locked, rc=%d\n", rc);
+		return rc;
+	}
+
+	_W(pll, PHY_TX_LANE_CONFIG(0), 0x1F);
+	_W(pll, PHY_TX_LANE_CONFIG(1), 0x1F);
+	_W(pll, PHY_TX_LANE_CONFIG(2), 0x1F);
+	_W(pll, PHY_TX_LANE_CONFIG(3), 0x1F);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	rc = hdmi_8998_phy_ready_status(io);
+	if (rc) {
+		pr_err("PHY NOT READY, rc=%d\n", rc);
+		return rc;
+	}
+
+	_W(phy, PHY_CFG, 0x58);
+	udelay(1);
+	_W(phy, PHY_CFG, 0x59);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	io->pll_on = true;
+	return rc;
+}
+
+static int hdmi_8998_vco_prepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+	int rc = 0;
+
+	if (!io) {
+		pr_err("hdmi pll resources are not available\n");
+		return -EINVAL;
+	}
+
+	rc = mdss_pll_resource_enable(io, true);
+	if (rc) {
+		pr_err("pll resource enable failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	if (!vco->rate_set && vco->rate) {
+		rc = hdmi_8998_pll_set_clk_rate(c, vco->rate);
+		if (rc) {
+			pr_err("set rate failed, rc=%d\n", rc);
+			goto error;
+		}
+	}
+
+	rc = hdmi_8998_pll_enable(c);
+	if (rc)
+		pr_err("pll enabled failed, rc=%d\n", rc);
+
+error:
+	if (rc)
+		mdss_pll_resource_enable(io, false);
+
+	return rc;
+}
+
+static void hdmi_8998_pll_disable(struct hdmi_pll_vco_clk *vco)
+{
+	struct mdss_pll_resources *io = vco->priv;
+	void __iomem *phy = io->phy_base;
+
+	if (!io->pll_on)
+		return;
+
+	_W(phy, PHY_PD_CTL, 0x0);
+
+	/* Ensure all registers are flushed to hardware */
+	wmb();
+
+	vco->rate_set = false;
+	io->handoff_resources = false;
+	io->pll_on = false;
+}
+
+static void hdmi_8998_vco_unprepare(struct clk *c)
+{
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (!io) {
+		pr_err("HDMI pll resources not available\n");
+		return;
+	}
+
+	hdmi_8998_pll_disable(vco);
+	mdss_pll_resource_enable(io, false);
+}
+
+static enum handoff hdmi_8998_vco_handoff(struct clk *c)
+{
+	enum handoff ret = HANDOFF_DISABLED_CLK;
+	struct hdmi_pll_vco_clk *vco = to_hdmi_vco_clk(c);
+	struct mdss_pll_resources *io = vco->priv;
+
+	if (mdss_pll_resource_enable(io, true)) {
+		pr_err("pll resource can't be enabled\n");
+		return ret;
+	}
+
+	io->handoff_resources = true;
+
+	if (_R(io->pll_base, C_READY_STATUS) & BIT(0) &&
+			_R(io->phy_base, PHY_STATUS) & BIT(0)) {
+		io->pll_on = true;
+		/* TODO: calculate rate based on the phy/pll register values. */
+		ret = HANDOFF_ENABLED_CLK;
+	} else {
+		io->handoff_resources = false;
+		mdss_pll_resource_enable(io, false);
+		pr_debug("%s: PHY/PLL not ready\n", __func__);
+	}
+
+	pr_debug("done, ret=%d\n", ret);
+	return ret;
+}
+
+static const struct clk_ops hdmi_8998_vco_clk_ops = {
+	.set_rate = hdmi_8998_vco_set_rate,
+	.round_rate = hdmi_8998_vco_round_rate,
+	.prepare = hdmi_8998_vco_prepare,
+	.unprepare = hdmi_8998_vco_unprepare,
+	.handoff = hdmi_8998_vco_handoff,
+};
+
+static struct hdmi_pll_vco_clk hdmi_vco_clk = {
+	.min_rate = HDMI_VCO_MIN_RATE_HZ,
+	.max_rate = HDMI_VCO_MAX_RATE_HZ,
+	.c = {
+		.dbg_name = "hdmi_8998_vco_clk",
+		.ops = &hdmi_8998_vco_clk_ops,
+		CLK_INIT(hdmi_vco_clk.c),
+	},
+};
+
+static struct clk_lookup hdmipllcc_8998[] = {
+	CLK_LIST(hdmi_vco_clk),
+};
+
+int hdmi_8998_pll_clock_register(struct platform_device *pdev,
+				   struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+
+	hdmi_vco_clk.priv = pll_res;
+
+	rc = of_msm_clock_register(pdev->dev.of_node, hdmipllcc_8998,
+				   ARRAY_SIZE(hdmipllcc_8998));
+	if (rc) {
+		pr_err("clock register failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-hdmi-pll.h b/drivers/clk/qcom/mdss/mdss-hdmi-pll.h
new file mode 100644
index 0000000..bba389b
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-hdmi-pll.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDSS_HDMI_PLL_H
+#define __MDSS_HDMI_PLL_H
+
+struct hdmi_pll_cfg {
+	unsigned long vco_rate;
+	u32 reg;
+};
+
+struct hdmi_pll_vco_clk {
+	unsigned long	rate;	/* current vco rate */
+	unsigned long	min_rate;	/* min vco rate */
+	unsigned long	max_rate;	/* max vco rate */
+	bool		rate_set;
+	struct hdmi_pll_cfg *ip_seti;
+	struct hdmi_pll_cfg *cp_seti;
+	struct hdmi_pll_cfg *ip_setp;
+	struct hdmi_pll_cfg *cp_setp;
+	struct hdmi_pll_cfg *crctrl;
+	void		*priv;
+
+	struct clk	c;
+};
+
+static inline struct hdmi_pll_vco_clk *to_hdmi_vco_clk(struct clk *clk)
+{
+	return container_of(clk, struct hdmi_pll_vco_clk, c);
+}
+
+int hdmi_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int hdmi_20nm_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v1_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v2_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v3_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res);
+
+int hdmi_8996_v3_1p8_pll_clock_register(struct platform_device *pdev,
+				 struct mdss_pll_resources *pll_res);
+
+int hdmi_8998_pll_clock_register(struct platform_device *pdev,
+				   struct mdss_pll_resources *pll_res);
+#endif
diff --git a/drivers/clk/qcom/mdss/mdss-pll-util.c b/drivers/clk/qcom/mdss/mdss-pll-util.c
new file mode 100644
index 0000000..5ed966b
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-pll-util.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/memblock.h>
+
+#include "mdss-pll.h"
+
+/**
+ * mdss_pll_get_mp_by_reg_name() -- Find power module by regulator name
+ *@pll_res: Pointer to the PLL resource
+ *@name: Regulator name as specified in the pll dtsi
+ *
+ * This is a helper function to retrieve the regulator information
+ * for each pll resource.
+ */
+struct dss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
+		, char *name)
+{
+
+	struct dss_vreg *regulator = NULL;
+	int i;
+
+	if ((pll_res == NULL) || (pll_res->mp.vreg_config == NULL)) {
+		pr_err("%s Invalid PLL resource\n", __func__);
+		goto error;
+	}
+
+	regulator = pll_res->mp.vreg_config;
+
+	for (i = 0; i < pll_res->mp.num_vreg; i++) {
+		if (!strcmp(name, regulator->vreg_name)) {
+			pr_debug("Found regulator match for %s\n", name);
+			break;
+		}
+		regulator++;
+	}
+
+error:
+	return regulator;
+}
+
+int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
+								bool enable)
+{
+	int rc = 0;
+	struct dss_module_power *mp = &pll_res->mp;
+
+	if (enable) {
+		rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+		if (rc) {
+			pr_err("Failed to enable vregs rc=%d\n", rc);
+			goto vreg_err;
+		}
+
+		rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+		if (rc) {
+			pr_err("Failed to set clock rate rc=%d\n", rc);
+			goto clk_err;
+		}
+
+		rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+		if (rc) {
+			pr_err("clock enable failed rc:%d\n", rc);
+			goto clk_err;
+		}
+	} else {
+		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+
+		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+	}
+
+	return rc;
+
+clk_err:
+	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+	return rc;
+}
+
+static int mdss_pll_util_parse_dt_supply(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int i = 0, rc = 0;
+	u32 tmp = 0;
+	struct device_node *of_node = NULL, *supply_root_node = NULL;
+	struct device_node *supply_node = NULL;
+	struct dss_module_power *mp = &pll_res->mp;
+
+	of_node = pdev->dev.of_node;
+
+	mp->num_vreg = 0;
+	supply_root_node = of_get_child_by_name(of_node,
+						"qcom,platform-supply-entries");
+	if (!supply_root_node) {
+		pr_err("no supply entry present\n");
+		return rc;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+		mp->num_vreg++;
+	}
+
+	if (mp->num_vreg == 0) {
+		pr_debug("no vreg\n");
+		return rc;
+	}
+	pr_debug("vreg found. count=%d\n", mp->num_vreg);
+
+	mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct dss_vreg) *
+						mp->num_vreg, GFP_KERNEL);
+	if (!mp->vreg_config) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	for_each_child_of_node(supply_root_node, supply_node) {
+
+		const char *st = NULL;
+
+		rc = of_property_read_string(supply_node,
+						"qcom,supply-name", &st);
+		if (rc) {
+			pr_err(":error reading name. rc=%d\n", rc);
+			goto error;
+		}
+
+		strlcpy(mp->vreg_config[i].vreg_name, st,
+					sizeof(mp->vreg_config[i].vreg_name));
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-min-voltage", &tmp);
+		if (rc) {
+			pr_err(": error reading min volt. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].min_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-max-voltage", &tmp);
+		if (rc) {
+			pr_err(": error reading max volt. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].max_voltage = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-enable-load", &tmp);
+		if (rc) {
+			pr_err(": error reading enable load. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].enable_load = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-disable-load", &tmp);
+		if (rc) {
+			pr_err(": error reading disable load. rc=%d\n", rc);
+			goto error;
+		}
+		mp->vreg_config[i].disable_load = tmp;
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-pre-on-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply pre sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-pre-off-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply pre sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-post-on-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply post sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+		rc = of_property_read_u32(supply_node,
+					"qcom,supply-post-off-sleep", &tmp);
+		if (rc)
+			pr_debug("error reading supply post sleep value. rc=%d\n",
+							rc);
+
+		mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+		pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+					mp->vreg_config[i].vreg_name,
+					mp->vreg_config[i].min_voltage,
+					mp->vreg_config[i].max_voltage,
+					mp->vreg_config[i].enable_load,
+					mp->vreg_config[i].disable_load,
+					mp->vreg_config[i].pre_on_sleep,
+					mp->vreg_config[i].post_on_sleep,
+					mp->vreg_config[i].pre_off_sleep,
+					mp->vreg_config[i].post_off_sleep);
+		++i;
+
+		rc = 0;
+	}
+
+	return rc;
+
+error:
+	if (mp->vreg_config) {
+		devm_kfree(&pdev->dev, mp->vreg_config);
+		mp->vreg_config = NULL;
+		mp->num_vreg = 0;
+	}
+
+	return rc;
+}
+
+static int mdss_pll_util_parse_dt_clock(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res)
+{
+	u32 i = 0, rc = 0;
+	struct dss_module_power *mp = &pll_res->mp;
+	const char *clock_name;
+	u32 clock_rate;
+
+	mp->num_clk = of_property_count_strings(pdev->dev.of_node,
+							"clock-names");
+	if (mp->num_clk <= 0) {
+		pr_err("clocks are not defined\n");
+		goto clk_err;
+	}
+
+	mp->clk_config = devm_kzalloc(&pdev->dev,
+			sizeof(struct dss_clk) * mp->num_clk, GFP_KERNEL);
+	if (!mp->clk_config) {
+		rc = -ENOMEM;
+		mp->num_clk = 0;
+		goto clk_err;
+	}
+
+	for (i = 0; i < mp->num_clk; i++) {
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+							i, &clock_name);
+		strlcpy(mp->clk_config[i].clk_name, clock_name,
+				sizeof(mp->clk_config[i].clk_name));
+
+		of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+							i, &clock_rate);
+		mp->clk_config[i].rate = clock_rate;
+
+		if (!clock_rate)
+			mp->clk_config[i].type = DSS_CLK_AHB;
+		else
+			mp->clk_config[i].type = DSS_CLK_PCLK;
+	}
+
+clk_err:
+	return rc;
+}
+
+static void mdss_pll_free_bootmem(u32 mem_addr, u32 size)
+{
+	unsigned long pfn_start, pfn_end, pfn_idx;
+
+	pfn_start = mem_addr >> PAGE_SHIFT;
+	pfn_end = (mem_addr + size) >> PAGE_SHIFT;
+	for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+		free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+static int mdss_pll_util_parse_dt_dfps(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+	struct device_node *pnode;
+	const u32 *addr;
+	struct vm_struct *area;
+	u64 size;
+	u32 offsets[2];
+	unsigned long virt_add;
+
+	pnode = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+	if (IS_ERR_OR_NULL(pnode)) {
+		rc = PTR_ERR(pnode);
+		goto pnode_err;
+	}
+
+	addr = of_get_address(pnode, 0, &size, NULL);
+	if (!addr) {
+		pr_err("failed to parse the dfps memory address\n");
+		rc = -EINVAL;
+		goto pnode_err;
+	}
+	/* maintain compatibility for 32/64 bit */
+	offsets[0] = (u32) of_read_ulong(addr, 2);
+	offsets[1] = (u32) size;
+
+	area = get_vm_area(offsets[1], VM_IOREMAP);
+	if (!area) {
+		rc = -ENOMEM;
+		goto dfps_mem_err;
+	}
+
+	virt_add = (unsigned long)area->addr;
+	rc = ioremap_page_range(virt_add, (virt_add + offsets[1]),
+			offsets[0], PAGE_KERNEL);
+	if (rc) {
+		rc = -ENOMEM;
+		goto ioremap_err;
+	}
+
+	pll_res->dfps = kzalloc(sizeof(struct dfps_info), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(pll_res->dfps)) {
+		rc = PTR_ERR(pll_res->dfps);
+		pr_err("couldn't allocate dfps kernel memory\n");
+		goto addr_err;
+	}
+
+	/* memcopy complete dfps structure from kernel virtual memory */
+	memcpy_fromio(pll_res->dfps, area->addr, sizeof(struct dfps_info));
+
+addr_err:
+	if (virt_add)
+		unmap_kernel_range(virt_add, (unsigned long) size);
+ioremap_err:
+	if (area)
+		vfree(area->addr);
+dfps_mem_err:
+	/* free the dfps memory here */
+	memblock_free(offsets[0], offsets[1]);
+	mdss_pll_free_bootmem(offsets[0], offsets[1]);
+pnode_err:
+	if (pnode)
+		of_node_put(pnode);
+
+	dma_release_declared_memory(&pdev->dev);
+	return rc;
+}
+
+int mdss_pll_util_resource_parse(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+	struct dss_module_power *mp = &pll_res->mp;
+
+	rc = mdss_pll_util_parse_dt_supply(pdev, pll_res);
+	if (rc) {
+		pr_err("vreg parsing failed rc=%d\n", rc);
+		goto end;
+	}
+
+	rc = mdss_pll_util_parse_dt_clock(pdev, pll_res);
+	if (rc) {
+		pr_err("clock name parsing failed rc=%d\n", rc);
+		goto clk_err;
+	}
+
+	if (mdss_pll_util_parse_dt_dfps(pdev, pll_res))
+		pr_err("dfps not enabled!\n");
+
+	return rc;
+
+clk_err:
+	devm_kfree(&pdev->dev, mp->vreg_config);
+	mp->num_vreg = 0;
+end:
+	return rc;
+}
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
new file mode 100644
index 0000000..81366d8
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)	"%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include "mdss-pll.h"
+#include "mdss-dsi-pll.h"
+#include "mdss-dp-pll.h"
+
+int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
+{
+	int rc = 0;
+	int changed = 0;
+
+	if (!pll_res) {
+		pr_err("Invalid input parameters\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Don't turn off resources during handoff or add more than
+	 * 1 refcount.
+	 */
+	if (pll_res->handoff_resources &&
+		(!enable || (enable & pll_res->resource_enable))) {
+		pr_debug("Do not turn on/off pll resources during handoff case\n");
+		return rc;
+	}
+
+	if (enable) {
+		if (pll_res->resource_ref_cnt == 0)
+			changed++;
+		pll_res->resource_ref_cnt++;
+	} else {
+		if (pll_res->resource_ref_cnt) {
+			pll_res->resource_ref_cnt--;
+			if (pll_res->resource_ref_cnt == 0)
+				changed++;
+		} else {
+			pr_err("PLL Resources already OFF\n");
+		}
+	}
+
+	if (changed) {
+		rc = mdss_pll_util_resource_enable(pll_res, enable);
+		if (rc)
+			pr_err("Resource update failed rc=%d\n", rc);
+		else
+			pll_res->resource_enable = enable;
+	}
+
+	return rc;
+}
+
+static int mdss_pll_resource_init(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+	struct dss_module_power *mp = &pll_res->mp;
+
+	rc = msm_dss_config_vreg(&pdev->dev,
+				mp->vreg_config, mp->num_vreg, 1);
+	if (rc) {
+		pr_err("Vreg config failed rc=%d\n", rc);
+		goto vreg_err;
+	}
+
+	rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+	if (rc) {
+		pr_err("Clock get failed rc=%d\n", rc);
+		goto clk_err;
+	}
+
+	return rc;
+
+clk_err:
+	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+	return rc;
+}
+
+static void mdss_pll_resource_deinit(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	struct dss_module_power *mp = &pll_res->mp;
+
+	msm_dss_put_clk(mp->clk_config, mp->num_clk);
+
+	msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+}
+
+static void mdss_pll_resource_release(struct platform_device *pdev,
+					struct mdss_pll_resources *pll_res)
+{
+	struct dss_module_power *mp = &pll_res->mp;
+
+	mp->num_vreg = 0;
+	mp->num_clk = 0;
+}
+
+static int mdss_pll_resource_parse(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc = 0;
+	const char *compatible_stream;
+
+	rc = mdss_pll_util_resource_parse(pdev, pll_res);
+	if (rc) {
+		pr_err("Failed to parse the resources rc=%d\n", rc);
+		goto end;
+	}
+
+	compatible_stream = of_get_property(pdev->dev.of_node,
+				"compatible", NULL);
+	if (!compatible_stream) {
+		pr_err("Failed to parse the compatible stream\n");
+		goto err;
+	}
+
+	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
+		pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+	if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_10nm"))
+		pll_res->pll_interface_type = MDSS_DP_PLL_10NM;
+	else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_7nm"))
+		pll_res->pll_interface_type = MDSS_DP_PLL_7NM;
+	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_7nm"))
+		pll_res->pll_interface_type = MDSS_DSI_PLL_7NM;
+	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_28lpm"))
+		pll_res->pll_interface_type = MDSS_DSI_PLL_28LPM;
+	else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_14nm"))
+		pll_res->pll_interface_type = MDSS_DSI_PLL_14NM;
+	else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_14nm"))
+		pll_res->pll_interface_type = MDSS_DP_PLL_14NM;
+	else
+		goto err;
+
+	return rc;
+
+err:
+	mdss_pll_resource_release(pdev, pll_res);
+end:
+	return rc;
+}
+static int mdss_pll_clock_register(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res)
+{
+	int rc;
+
+	switch (pll_res->pll_interface_type) {
+	case MDSS_DSI_PLL_10NM:
+		rc = dsi_pll_clock_register_10nm(pdev, pll_res);
+		break;
+	case MDSS_DP_PLL_10NM:
+		rc = dp_pll_clock_register_10nm(pdev, pll_res);
+		break;
+	case MDSS_DSI_PLL_7NM:
+		rc = dsi_pll_clock_register_7nm(pdev, pll_res);
+		break;
+	case MDSS_DP_PLL_7NM:
+		rc = dp_pll_clock_register_7nm(pdev, pll_res);
+		break;
+	case MDSS_DSI_PLL_28LPM:
+		rc = dsi_pll_clock_register_28lpm(pdev, pll_res);
+		break;
+	case MDSS_DSI_PLL_14NM:
+		rc = dsi_pll_clock_register_14nm(pdev, pll_res);
+		break;
+	case MDSS_DP_PLL_14NM:
+		rc = dp_pll_clock_register_14nm(pdev, pll_res);
+		break;
+	case MDSS_UNKNOWN_PLL:
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc)
+		pr_err("Pll ndx=%d clock register failed rc=%d\n",
+				pll_res->index, rc);
+
+	return rc;
+}
+
+static inline int mdss_pll_get_ioresurces(struct platform_device *pdev,
+				void __iomem **regmap, char *resource_name)
+{
+	int rc = 0;
+	struct resource *rsc = platform_get_resource_byname(pdev,
+						IORESOURCE_MEM, resource_name);
+	if (rsc) {
+		*regmap = devm_ioremap(&pdev->dev,
+					rsc->start, resource_size(rsc));
+		if (!regmap)
+			return -ENOMEM;
+	}
+	return rc;
+}
+
+static int mdss_pll_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	const char *label;
+	struct mdss_pll_resources *pll_res;
+
+	if (!pdev->dev.of_node) {
+		pr_err("MDSS pll driver only supports device tree probe\n");
+		return -ENOTSUPP;
+	}
+
+	label = of_get_property(pdev->dev.of_node, "label", NULL);
+	if (!label)
+		pr_info("%d: MDSS pll label not specified\n");
+	else
+		pr_info("MDSS pll label = %s\n", label);
+
+	pll_res = devm_kzalloc(&pdev->dev, sizeof(struct mdss_pll_resources),
+								GFP_KERNEL);
+	if (!pll_res)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, pll_res);
+
+	rc = of_property_read_u32(pdev->dev.of_node, "cell-index",
+			&pll_res->index);
+	if (rc) {
+		pr_err("Unable to get the cell-index rc=%d\n", rc);
+		pll_res->index = 0;
+	}
+
+	pll_res->ssc_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,dsi-pll-ssc-en");
+
+	if (pll_res->ssc_en) {
+		pr_info("%s: label=%s PLL SSC enabled\n", __func__, label);
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,ssc-frequency-hz", &pll_res->ssc_freq);
+
+		rc = of_property_read_u32(pdev->dev.of_node,
+			"qcom,ssc-ppm", &pll_res->ssc_ppm);
+
+		pll_res->ssc_center = false;
+
+		label = of_get_property(pdev->dev.of_node,
+			"qcom,dsi-pll-ssc-mode", NULL);
+
+		if (label && !strcmp(label, "center-spread"))
+			pll_res->ssc_center = true;
+	}
+
+
+	if (mdss_pll_get_ioresurces(pdev, &pll_res->pll_base, "pll_base")) {
+		pr_err("Unable to remap pll base resources\n");
+		return -ENOMEM;
+	}
+
+	pr_debug("%s: ndx=%d base=%p\n", __func__,
+			pll_res->index, pll_res->pll_base);
+
+	rc = mdss_pll_resource_parse(pdev, pll_res);
+	if (rc) {
+		pr_err("Pll resource parsing from dt failed rc=%d\n", rc);
+		return rc;
+	}
+
+	if (mdss_pll_get_ioresurces(pdev, &pll_res->phy_base, "phy_base")) {
+		pr_err("Unable to remap pll phy base resources\n");
+		return -ENOMEM;
+	}
+
+	if (mdss_pll_get_ioresurces(pdev, &pll_res->dyn_pll_base,
+							"dynamic_pll_base")) {
+		pr_err("Unable to remap dynamic pll base resources\n");
+		return -ENOMEM;
+	}
+
+	if (mdss_pll_get_ioresurces(pdev, &pll_res->ln_tx0_base,
+							"ln_tx0_base")) {
+		pr_err("Unable to remap Lane TX0 base resources\n");
+		return -ENOMEM;
+	}
+
+	if (mdss_pll_get_ioresurces(pdev, &pll_res->ln_tx1_base,
+							"ln_tx1_base")) {
+		pr_err("Unable to remap Lane TX1 base resources\n");
+		return -ENOMEM;
+	}
+
+	if (mdss_pll_get_ioresurces(pdev, &pll_res->gdsc_base, "gdsc_base")) {
+		pr_err("Unable to remap gdsc base resources\n");
+		return -ENOMEM;
+	}
+
+	rc = mdss_pll_resource_init(pdev, pll_res);
+	if (rc) {
+		pr_err("Pll ndx=%d resource init failed rc=%d\n",
+				pll_res->index, rc);
+		return rc;
+	}
+
+	rc = mdss_pll_clock_register(pdev, pll_res);
+	if (rc) {
+		pr_err("Pll ndx=%d clock register failed rc=%d\n",
+			pll_res->index, rc);
+		goto clock_register_error;
+	}
+
+	return rc;
+
+clock_register_error:
+	mdss_pll_resource_deinit(pdev, pll_res);
+	return rc;
+}
+
+static int mdss_pll_remove(struct platform_device *pdev)
+{
+	struct mdss_pll_resources *pll_res;
+
+	pll_res = platform_get_drvdata(pdev);
+	if (!pll_res) {
+		pr_err("Invalid PLL resource data\n");
+		return 0;
+	}
+
+	mdss_pll_resource_deinit(pdev, pll_res);
+	mdss_pll_resource_release(pdev, pll_res);
+	return 0;
+}
+
+static const struct of_device_id mdss_pll_dt_match[] = {
+	{.compatible = "qcom,mdss_dsi_pll_10nm"}
+	{.compatible = "qcom,mdss_dp_pll_10nm"}
+	{.compatible = "qcom,mdss_dsi_pll_7nm"}
+	{.compatible = "qcom,mdss_dp_pll_7nm"}
+	{.compatible = "qcom,mdss_dsi_pll_28lpm"}
+	{.compatible = "qcom,mdss_dsi_pll_14nm"}
+	{.compatible = "qcom,mdss_dp_pll_14nm"}
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, mdss_clock_dt_match);
+
+static struct platform_driver mdss_pll_driver = {
+	.probe = mdss_pll_probe,
+	.remove = mdss_pll_remove,
+	.driver = {
+		.name = "mdss_pll",
+		.of_match_table = mdss_pll_dt_match,
+	},
+};
+
+static int __init mdss_pll_driver_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&mdss_pll_driver);
+	if (rc)
+		pr_err("mdss_register_pll_driver() failed!\n");
+
+	return rc;
+}
+fs_initcall(mdss_pll_driver_init);
+
+static void __exit mdss_pll_driver_deinit(void)
+{
+	platform_driver_unregister(&mdss_pll_driver);
+}
+module_exit(mdss_pll_driver_deinit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("mdss pll driver");
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
new file mode 100644
index 0000000..ace69c4
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -0,0 +1,241 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __MDSS_PLL_H
+#define __MDSS_PLL_H
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+#include "../clk-regmap.h"
+#include "../clk-regmap-divider.h"
+#include "../clk-regmap-mux.h"
+
+#if defined(CONFIG_DRM)
+#include <linux/sde_io_util.h>
+#else
+#include <linux/mdss_io_util.h>
+#endif
+
+#define MDSS_PLL_REG_W(base, offset, data)	\
+				writel_relaxed((data), (base) + (offset))
+#define MDSS_PLL_REG_R(base, offset)	readl_relaxed((base) + (offset))
+
+#define PLL_CALC_DATA(addr0, addr1, data0, data1)      \
+	(((data1) << 24) | ((((addr1) / 4) & 0xFF) << 16) | \
+	 ((data0) << 8) | (((addr0) / 4) & 0xFF))
+
+#define MDSS_DYN_PLL_REG_W(base, offset, addr0, addr1, data0, data1)   \
+		writel_relaxed(PLL_CALC_DATA(addr0, addr1, data0, data1), \
+			(base) + (offset))
+
+enum {
+	MDSS_DSI_PLL_10NM,
+	MDSS_DP_PLL_10NM,
+	MDSS_DSI_PLL_7NM,
+	MDSS_DP_PLL_7NM,
+	MDSS_DSI_PLL_28LPM,
+	MDSS_DSI_PLL_14NM,
+	MDSS_DP_PLL_14NM,
+	MDSS_UNKNOWN_PLL,
+};
+
+enum {
+	MDSS_PLL_TARGET_8996,
+};
+
+#define DFPS_MAX_NUM_OF_FRAME_RATES 20
+
+struct dfps_panel_info {
+	uint32_t enabled;
+	uint32_t frame_rate_cnt;
+	uint32_t frame_rate[DFPS_MAX_NUM_OF_FRAME_RATES]; /* hz */
+};
+
+struct dfps_pll_codes {
+	uint32_t pll_codes_1;
+	uint32_t pll_codes_2;
+};
+
+struct dfps_codes_info {
+	uint32_t is_valid;
+	uint32_t frame_rate;	/* hz */
+	uint32_t clk_rate;	/* hz */
+	struct dfps_pll_codes pll_codes;
+};
+
+struct dfps_info {
+	struct dfps_panel_info panel_dfps;
+	struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
+	void *dfps_fb_base;
+};
+
+struct mdss_pll_resources {
+
+	/* Pll specific resources like GPIO, power supply, clocks, etc*/
+	struct dss_module_power mp;
+
+	/*
+	 * dsi/edp/hmdi plls' base register, phy, gdsc and dynamic refresh
+	 * register mapping
+	 */
+	void __iomem	*pll_base;
+	void __iomem	*phy_base;
+	void __iomem	*ln_tx0_base;
+	void __iomem	*ln_tx1_base;
+	void __iomem	*gdsc_base;
+	void __iomem	*dyn_pll_base;
+
+	bool	is_init_locked;
+	s64	vco_current_rate;
+	s64	vco_locking_rate;
+	s64	vco_ref_clk_rate;
+
+	/*
+	 * Certain pll's needs to update the same vco rate after resume in
+	 * suspend/resume scenario. Cached the vco rate for such plls.
+	 */
+	unsigned long	vco_cached_rate;
+	u32		cached_cfg0;
+	u32		cached_cfg1;
+	u32		cached_outdiv;
+
+	u32		cached_postdiv1;
+	u32		cached_postdiv3;
+	u32		cached_vreg_cfg;
+
+	/* dsi/edp/hmdi pll interface type */
+	u32		pll_interface_type;
+
+	/*
+	 * Target ID. Used in pll_register API for valid target check before
+	 * registering the PLL clocks.
+	 */
+	u32		target_id;
+
+	/* HW recommended delay during configuration of vco clock rate */
+	u32		vco_delay;
+
+	/* Ref-count of the PLL resources */
+	u32		resource_ref_cnt;
+
+	/*
+	 * Keep track to resource status to avoid updating same status for the
+	 * pll from different paths
+	 */
+	bool		resource_enable;
+
+	/*
+	 * Certain plls' do not allow vco rate update if it is on. Keep track of
+	 * status for them to turn on/off after set rate success.
+	 */
+	bool		pll_on;
+
+	/*
+	 * handoff_status is true of pll is already enabled by bootloader with
+	 * continuous splash enable case. Clock API will call the handoff API
+	 * to enable the status. It is disabled if continuous splash
+	 * feature is disabled.
+	 */
+	bool		handoff_resources;
+
+	/*
+	 * caching the pll trim codes in the case of dynamic refresh
+	 */
+	int		cache_pll_trim_codes[2];
+
+	/*
+	 * for maintaining the status of saving trim codes
+	 */
+	bool		reg_upd;
+
+	/*
+	 * Notifier callback for MDSS gdsc regulator events
+	 */
+	struct notifier_block gdsc_cb;
+
+	/*
+	 * Worker function to call PLL off event
+	 */
+	struct work_struct pll_off;
+
+	/*
+	 * PLL index if multiple index are available. Eg. in case of
+	 * DSI we have 2 plls.
+	 */
+	uint32_t index;
+
+	bool ssc_en;	/* share pll with master */
+	bool ssc_center;	/* default is down spread */
+	u32 ssc_freq;
+	u32 ssc_ppm;
+
+	struct mdss_pll_resources *slave;
+
+	/*
+	 * target pll revision information
+	 */
+	int		revision;
+
+	void *priv;
+
+	/*
+	 * dynamic refresh pll codes stored in this structure
+	 */
+	struct dfps_info *dfps;
+
+};
+
+struct mdss_pll_vco_calc {
+	s32 div_frac_start1;
+	s32 div_frac_start2;
+	s32 div_frac_start3;
+	s64 dec_start1;
+	s64 dec_start2;
+	s64 pll_plllock_cmp1;
+	s64 pll_plllock_cmp2;
+	s64 pll_plllock_cmp3;
+};
+
+static inline bool is_gdsc_disabled(struct mdss_pll_resources *pll_res)
+{
+	if (!pll_res->gdsc_base) {
+		WARN(1, "gdsc_base register is not defined\n");
+		return true;
+	}
+	return readl_relaxed(pll_res->gdsc_base) & BIT(31) ? false : true;
+}
+
+static inline int mdss_pll_div_prepare(struct clk_hw *hw)
+{
+	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+	/* Restore the divider's value */
+	return hw->init->ops->set_rate(hw, clk_hw_get_rate(hw),
+				clk_hw_get_rate(parent_hw));
+}
+
+static inline int mdss_set_mux_sel(void *context, unsigned int reg,
+					unsigned int val)
+{
+	return 0;
+}
+
+static inline int mdss_get_mux_sel(void *context, unsigned int reg,
+					unsigned int *val)
+{
+	*val = 0;
+	return 0;
+}
+
+int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable);
+int mdss_pll_util_resource_enable(struct mdss_pll_resources *pll_res,
+								bool enable);
+int mdss_pll_util_resource_parse(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+struct dss_vreg *mdss_pll_get_mp_by_reg_name(struct mdss_pll_resources *pll_res
+		, char *name);
+#endif
diff --git a/drivers/clk/qcom/mdss/mdss_pll_trace.h b/drivers/clk/qcom/mdss/mdss_pll_trace.h
new file mode 100644
index 0000000..0de9def
--- /dev/null
+++ b/drivers/clk/qcom/mdss/mdss_pll_trace.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_MDSS_PLL_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MDSS_PLL_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mdss_pll
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mdss_pll_trace
+
+
+TRACE_EVENT(mdss_pll_lock_start,
+	TP_PROTO(
+			u64 vco_cached_rate,
+			s64 vco_current_rate,
+			u32 cached_cfg0,
+			u32 cached_cfg1,
+			u32 cached_outdiv,
+			u32 resource_ref_cnt),
+	TP_ARGS(
+			vco_cached_rate,
+			vco_current_rate,
+			cached_cfg0,
+			cached_cfg1,
+			cached_outdiv,
+			resource_ref_cnt),
+	TP_STRUCT__entry(
+			__field(u64, vco_cached_rate)
+			__field(s64, vco_current_rate)
+			__field(u32, cached_cfg0)
+			__field(u32, cached_cfg1)
+			__field(u32, cached_outdiv)
+			__field(u32, resource_ref_cnt)
+
+	),
+	TP_fast_assign(
+			__entry->vco_cached_rate = vco_cached_rate;
+			__entry->vco_current_rate = vco_current_rate;
+			__entry->cached_cfg0 = cached_cfg0;
+			__entry->cached_cfg1 = cached_cfg1;
+			__entry->cached_outdiv = cached_outdiv;
+			__entry->resource_ref_cnt = resource_ref_cnt;
+	),
+	 TP_printk(
+		"vco_cached_rate=%llu vco_current_rate=%lld cached_cfg0=%d cached_cfg1=%d cached_outdiv=%d resource_ref_cnt=%d",
+			__entry->vco_cached_rate,
+			__entry->vco_current_rate,
+			__entry->cached_cfg0,
+			__entry->cached_cfg1,
+			__entry->cached_outdiv,
+			__entry->resource_ref_cnt)
+);
+
+TRACE_EVENT(pll_tracing_mark_write,
+	TP_PROTO(int pid, const char *name, bool trace_begin),
+	TP_ARGS(pid, name, trace_begin),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(trace_name, name)
+			__field(bool, trace_begin)
+	),
+	TP_fast_assign(
+			__entry->pid = pid;
+			__assign_str(trace_name, name);
+			__entry->trace_begin = trace_begin;
+	),
+	TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+		__entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(mdss_pll_trace_counter,
+	TP_PROTO(int pid, char *name, int value),
+	TP_ARGS(pid, name, value),
+	TP_STRUCT__entry(
+			__field(int, pid)
+			__string(counter_name, name)
+			__field(int, value)
+	),
+	TP_fast_assign(
+			__entry->pid = current->tgid;
+			__assign_str(counter_name, name);
+			__entry->value = value;
+	),
+	TP_printk("%d|%s|%d", __entry->pid,
+			__get_str(counter_name), __entry->value)
+)
+
+#define MDSS_PLL_ATRACE_END(name) trace_pll_tracing_mark_write(current->tgid,\
+		name, 0)
+#define MDSS_PLL_ATRACE_BEGIN(name) trace_pll_tracing_mark_write(current->tgid,\
+		name, 1)
+#define MDSS_PLL_ATRACE_FUNC() MDSS_PLL_ATRACE_BEGIN(__func__)
+#define MDSS_PLL_ATRACE_INT(name, value) \
+	trace_mdss_pll_trace_counter(current->tgid, name, value)
+
+
+#endif /* _MDSS_PLL_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0cd8eb7..93a9d72 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -298,3 +298,14 @@
 	  This add the CPUFreq driver support for Intel PXA2xx SOCs.
 
 	  If in doubt, say N.
+
+config ARM_QCOM_CPUFREQ_HW
+	bool "QCOM CPUFreq HW driver"
+	depends on ARCH_QCOM
+	help
+	 Support for the CPUFreq HW driver.
+	 Some QCOM chipsets have a HW engine to offload the steps
+	 necessary for changing the frequency of the CPUs. Firmware loaded
+	 in this engine exposes a programming interface to the OS.
+	 The driver implements the cpufreq interface for this HW engine.
+	 Say Y if you want to support CPUFreq HW.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 648beca..4187d89 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -88,6 +88,7 @@
 obj-$(CONFIG_ARM_TEGRA186_CPUFREQ)	+= tegra186-cpufreq.o
 obj-$(CONFIG_ARM_TI_CPUFREQ)		+= ti-cpufreq.o
 obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ)	+= vexpress-spc-cpufreq.o
+obj-$(CONFIG_ARM_QCOM_CPUFREQ_HW)	+= qcom-cpufreq-hw.o
 
 
 ##################################################################################
diff --git a/drivers/cpufreq/qcom-cpufreq-hw.c b/drivers/cpufreq/qcom-cpufreq-hw.c
new file mode 100644
index 0000000..0f9a3a4
--- /dev/null
+++ b/drivers/cpufreq/qcom-cpufreq-hw.c
@@ -0,0 +1,356 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#define LUT_MAX_ENTRIES			40U
+#define CORE_COUNT_VAL(val)		(((val) & (GENMASK(18, 16))) >> 16)
+#define LUT_ROW_SIZE			4
+#define CLK_HW_DIV			2
+
+enum {
+	REG_ENABLE,
+	REG_LUT_TABLE,
+	REG_PERF_STATE,
+
+	REG_ARRAY_SIZE,
+};
+
+struct cpufreq_qcom {
+	struct cpufreq_frequency_table *table;
+	void __iomem *reg_bases[REG_ARRAY_SIZE];
+	cpumask_t related_cpus;
+	unsigned int max_cores;
+	unsigned long xo_rate;
+	unsigned long cpu_hw_rate;
+};
+
+static const u16 cpufreq_qcom_std_offsets[REG_ARRAY_SIZE] = {
+	[REG_ENABLE]		= 0x0,
+	[REG_LUT_TABLE]		= 0x100,
+	[REG_PERF_STATE]	= 0x320,
+};
+
+static struct cpufreq_qcom *qcom_freq_domain_map[NR_CPUS];
+
+static int
+qcom_cpufreq_hw_target_index(struct cpufreq_policy *policy,
+			     unsigned int index)
+{
+	struct cpufreq_qcom *c = policy->driver_data;
+
+	writel_relaxed(index, c->reg_bases[REG_PERF_STATE]);
+
+	return 0;
+}
+
+static unsigned int qcom_cpufreq_hw_get(unsigned int cpu)
+{
+	struct cpufreq_qcom *c;
+	struct cpufreq_policy *policy;
+	unsigned int index;
+
+	policy = cpufreq_cpu_get_raw(cpu);
+	if (!policy)
+		return 0;
+
+	c = policy->driver_data;
+
+	index = readl_relaxed(c->reg_bases[REG_PERF_STATE]);
+	index = min(index, LUT_MAX_ENTRIES - 1);
+
+	return policy->freq_table[index].frequency;
+}
+
+static unsigned int
+qcom_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
+			    unsigned int target_freq)
+{
+	struct cpufreq_qcom *c = policy->driver_data;
+	int index;
+
+	index = policy->cached_resolved_idx;
+	if (index < 0)
+		return 0;
+
+	writel_relaxed(index, c->reg_bases[REG_PERF_STATE]);
+
+	return policy->freq_table[index].frequency;
+}
+
+static int qcom_cpufreq_hw_cpu_init(struct cpufreq_policy *policy)
+{
+	struct cpufreq_qcom *c;
+
+	c = qcom_freq_domain_map[policy->cpu];
+	if (!c) {
+		pr_err("No scaling support for CPU%d\n", policy->cpu);
+		return -ENODEV;
+	}
+
+	cpumask_copy(policy->cpus, &c->related_cpus);
+
+	policy->fast_switch_possible = true;
+	policy->freq_table = c->table;
+	policy->driver_data = c;
+
+	return 0;
+}
+
+static struct freq_attr *qcom_cpufreq_hw_attr[] = {
+	&cpufreq_freq_attr_scaling_available_freqs,
+	&cpufreq_freq_attr_scaling_boost_freqs,
+	NULL
+};
+
+static struct cpufreq_driver cpufreq_qcom_hw_driver = {
+	.flags		= CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK |
+			  CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
+	.verify		= cpufreq_generic_frequency_table_verify,
+	.target_index	= qcom_cpufreq_hw_target_index,
+	.get		= qcom_cpufreq_hw_get,
+	.init		= qcom_cpufreq_hw_cpu_init,
+	.fast_switch    = qcom_cpufreq_hw_fast_switch,
+	.name		= "qcom-cpufreq-hw",
+	.attr		= qcom_cpufreq_hw_attr,
+	.boost_enabled	= true,
+};
+
+static int qcom_cpufreq_hw_read_lut(struct platform_device *pdev,
+				    struct cpufreq_qcom *c)
+{
+	struct device *dev = &pdev->dev;
+	void __iomem *base;
+	u32 data, src, lval, i, core_count, prev_cc, prev_freq, cur_freq;
+
+	c->table = devm_kcalloc(dev, LUT_MAX_ENTRIES + 1,
+				sizeof(*c->table), GFP_KERNEL);
+	if (!c->table)
+		return -ENOMEM;
+
+	base = c->reg_bases[REG_LUT_TABLE];
+
+	for (i = 0; i < LUT_MAX_ENTRIES; i++) {
+		data = readl_relaxed(base + i * LUT_ROW_SIZE);
+		src = (data & GENMASK(31, 30)) >> 30;
+		lval = data & GENMASK(7, 0);
+		core_count = CORE_COUNT_VAL(data);
+
+		if (src)
+			c->table[i].frequency = c->xo_rate * lval / 1000;
+		else
+			c->table[i].frequency = c->cpu_hw_rate / 1000;
+
+		cur_freq = c->table[i].frequency;
+
+		dev_dbg(dev, "index=%d freq=%d, core_count %d\n",
+			i, c->table[i].frequency, core_count);
+
+		if (core_count != c->max_cores)
+			cur_freq = CPUFREQ_ENTRY_INVALID;
+
+		/*
+		 * Two of the same frequencies with the same core counts means
+		 * end of table.
+		 */
+		if (i > 0 && c->table[i - 1].frequency ==
+		   c->table[i].frequency && prev_cc == core_count) {
+			struct cpufreq_frequency_table *prev = &c->table[i - 1];
+
+			if (prev_freq == CPUFREQ_ENTRY_INVALID)
+				prev->flags = CPUFREQ_BOOST_FREQ;
+			break;
+		}
+		prev_cc = core_count;
+		prev_freq = cur_freq;
+	}
+
+	c->table[i].frequency = CPUFREQ_TABLE_END;
+
+	return 0;
+}
+
+static int qcom_get_related_cpus(int index, struct cpumask *m)
+{
+	struct device_node *cpu_np;
+	struct of_phandle_args args;
+	int cpu, ret;
+
+	for_each_possible_cpu(cpu) {
+		cpu_np = of_cpu_device_node_get(cpu);
+		if (!cpu_np)
+			continue;
+
+		ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
+				"#freq-domain-cells", 0, &args);
+		of_node_put(cpu_np);
+		if (ret < 0)
+			continue;
+
+		if (index == args.args[0])
+			cpumask_set_cpu(cpu, m);
+	}
+
+	return 0;
+}
+
+static int qcom_cpu_resources_init(struct platform_device *pdev,
+				   unsigned int cpu, int index,
+				   unsigned int max_cores,
+				   unsigned long xo_rate,
+				   unsigned long cpu_hw_rate)
+{
+	struct cpufreq_qcom *c;
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	const u16 *offsets;
+	int ret, i, cpu_r;
+	void __iomem *base;
+
+	if (qcom_freq_domain_map[cpu])
+		return 0;
+
+	c = devm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return -ENOMEM;
+
+	offsets = of_device_get_match_data(&pdev->dev);
+	if (!offsets)
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, index);
+	base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	for (i = REG_ENABLE; i < REG_ARRAY_SIZE; i++)
+		c->reg_bases[i] = base + offsets[i];
+
+	/* HW should be in enabled state to proceed */
+	if (!(readl_relaxed(c->reg_bases[REG_ENABLE]) & 0x1)) {
+		dev_err(dev, "Domain-%d cpufreq hardware not enabled\n", index);
+		return -ENODEV;
+	}
+
+	ret = qcom_get_related_cpus(index, &c->related_cpus);
+	if (ret) {
+		dev_err(dev, "Domain-%d failed to get related CPUs\n", index);
+		return ret;
+	}
+
+	c->max_cores = max_cores;
+	if (!c->max_cores)
+		return -ENOENT;
+
+	c->xo_rate = xo_rate;
+	c->cpu_hw_rate = cpu_hw_rate;
+
+	ret = qcom_cpufreq_hw_read_lut(pdev, c);
+	if (ret) {
+		dev_err(dev, "Domain-%d failed to read LUT\n", index);
+		return ret;
+	}
+
+	for_each_cpu(cpu_r, &c->related_cpus)
+		qcom_freq_domain_map[cpu_r] = c;
+
+	return 0;
+}
+
+static int qcom_resources_init(struct platform_device *pdev)
+{
+	struct device_node *cpu_np;
+	struct of_phandle_args args;
+	struct clk *clk;
+	unsigned int cpu;
+	unsigned long xo_rate, cpu_hw_rate;
+	int ret;
+
+	clk = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	xo_rate = clk_get_rate(clk);
+
+	devm_clk_put(&pdev->dev, clk);
+
+	clk = devm_clk_get(&pdev->dev, "cpu_clk");
+	if (IS_ERR(clk))
+		return PTR_ERR(clk);
+
+	cpu_hw_rate = clk_get_rate(clk) / CLK_HW_DIV;
+
+	devm_clk_put(&pdev->dev, clk);
+
+	for_each_possible_cpu(cpu) {
+		cpu_np = of_cpu_device_node_get(cpu);
+		if (!cpu_np) {
+			dev_dbg(&pdev->dev, "Failed to get cpu %d device\n",
+				cpu);
+			continue;
+		}
+
+		ret = of_parse_phandle_with_args(cpu_np, "qcom,freq-domain",
+				"#freq-domain-cells", 0, &args);
+		if (ret < 0)
+			return ret;
+
+		ret = qcom_cpu_resources_init(pdev, cpu, args.args[0],
+					      args.args[1], xo_rate,
+					      cpu_hw_rate);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	/* Get the bases of cpufreq for domains */
+	rc = qcom_resources_init(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "CPUFreq resource init failed\n");
+		return rc;
+	}
+
+	rc = cpufreq_register_driver(&cpufreq_qcom_hw_driver);
+	if (rc) {
+		dev_err(&pdev->dev, "CPUFreq HW driver failed to register\n");
+		return rc;
+	}
+
+	dev_dbg(&pdev->dev, "QCOM CPUFreq HW driver initialized\n");
+
+	return 0;
+}
+
+static const struct of_device_id qcom_cpufreq_hw_match[] = {
+	{ .compatible = "qcom,cpufreq-hw", .data = &cpufreq_qcom_std_offsets },
+	{}
+};
+
+static struct platform_driver qcom_cpufreq_hw_driver = {
+	.probe = qcom_cpufreq_hw_driver_probe,
+	.driver = {
+		.name = "qcom-cpufreq-hw",
+		.of_match_table = qcom_cpufreq_hw_match,
+	},
+};
+
+static int __init qcom_cpufreq_hw_init(void)
+{
+	return platform_driver_register(&qcom_cpufreq_hw_driver);
+}
+subsys_initcall(qcom_cpufreq_hw_init);
+
+MODULE_DESCRIPTION("QCOM firmware-based CPU Frequency driver");
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index a6771ce..829109f 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -87,7 +87,6 @@
 obj-$(CONFIG_DRM_QXL) += qxl/
 obj-$(CONFIG_DRM_BOCHS) += bochs/
 obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
-obj-$(CONFIG_DRM_MSM) += msm/
 obj-$(CONFIG_DRM_TEGRA) += tegra/
 obj-$(CONFIG_DRM_STM) += stm/
 obj-$(CONFIG_DRM_STI) += sti/
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ff0bfc6..ffd2621 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -98,6 +98,14 @@
 #define LEVEL_GTF2	2
 #define LEVEL_CVT	3
 
+/*Enum storing luminance types for HDR blocks in EDID*/
+enum luminance_value {
+	NO_LUMINANCE_DATA = 3,
+	MAXIMUM_LUMINANCE = 4,
+	FRAME_AVERAGE_LUMINANCE = 5,
+	MINIMUM_LUMINANCE = 6
+};
+
 static const struct edid_quirk {
 	char vendor[4];
 	int product_id;
@@ -216,7 +224,8 @@
 	/* 0x05 - 640x480@72Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
 		   704, 832, 0, 480, 489, 492, 520, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   .vrefresh = 72, },
 	/* 0x06 - 640x480@75Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
 		   720, 840, 0, 480, 481, 484, 500, 0,
@@ -573,7 +582,8 @@
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
 		   704,  832, 0, 480, 489, 492, 520, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
+		   .vrefresh = 72, }, /* 640x480@72Hz */
 	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
 		   768,  864, 0, 480, 483, 486, 525, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
@@ -2826,11 +2836,12 @@
 
 	return closure.modes;
 }
-
+#define VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK 0x0
 #define AUDIO_BLOCK	0x01
 #define VIDEO_BLOCK     0x02
 #define VENDOR_BLOCK    0x03
 #define SPEAKER_BLOCK	0x04
+#define HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK 0x06
 #define USE_EXTENDED_TAG 0x07
 #define EXT_VIDEO_CAPABILITY_BLOCK 0x00
 #define EXT_VIDEO_DATA_BLOCK_420	0x0E
@@ -3830,6 +3841,156 @@
 		      connector->audio_latency[1]);
 }
 
+/*
+ * drm_extract_vcdb_info - Parse the HDMI Video Capability Data Block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ *
+ * Parses the HDMI VCDB to extract sink info for @connector.
+ */
+static void
+drm_extract_vcdb_info(struct drm_connector *connector, const u8 *db)
+{
+	/*
+	 * Check if the sink specifies underscan
+	 * support for:
+	 * BIT 5: preferred video format
+	 * BIT 3: IT video format
+	 * BIT 1: CE video format
+	 */
+
+	connector->pt_scan_info =
+		(db[2] & (BIT(4) | BIT(5))) >> 4;
+	connector->it_scan_info =
+		(db[2] & (BIT(3) | BIT(2))) >> 2;
+	connector->ce_scan_info =
+		db[2] & (BIT(1) | BIT(0));
+
+	DRM_DEBUG_KMS("Scan Info (pt|it|ce): (%d|%d|%d)",
+			  (int) connector->pt_scan_info,
+			  (int) connector->it_scan_info,
+			  (int) connector->ce_scan_info);
+}
+
+static bool drm_edid_is_luminance_value_present(
+u32 block_length, enum luminance_value value)
+{
+	return block_length > NO_LUMINANCE_DATA && value <= block_length;
+}
+
+/*
+ * drm_extract_hdr_db - Parse the HDMI HDR extended block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the HDMI HDR extended block
+ *
+ * Parses the HDMI HDR extended block to extract sink info for @connector.
+ */
+static void
+drm_extract_hdr_db(struct drm_connector *connector, const u8 *db)
+{
+
+	u8 len = 0;
+
+	if (!db)
+		return;
+
+	len = db[0] & 0x1f;
+	/* Byte 3: Electro-Optical Transfer Functions */
+	connector->hdr_eotf = db[2] & 0x3F;
+
+	/* Byte 4: Static Metadata Descriptor Type 1 */
+	connector->hdr_metadata_type_one = (db[3] & BIT(0));
+
+	/* Byte 5: Desired Content Maximum Luminance */
+	if (drm_edid_is_luminance_value_present(len, MAXIMUM_LUMINANCE))
+		connector->hdr_max_luminance =
+			db[MAXIMUM_LUMINANCE];
+
+	/* Byte 6: Desired Content Max Frame-average Luminance */
+	if (drm_edid_is_luminance_value_present(len, FRAME_AVERAGE_LUMINANCE))
+		connector->hdr_avg_luminance =
+			db[FRAME_AVERAGE_LUMINANCE];
+
+	/* Byte 7: Desired Content Min Luminance */
+	if (drm_edid_is_luminance_value_present(len, MINIMUM_LUMINANCE))
+		connector->hdr_min_luminance =
+			db[MINIMUM_LUMINANCE];
+
+	connector->hdr_supported = true;
+
+	DRM_DEBUG_KMS("HDR electro-optical %d\n", connector->hdr_eotf);
+	DRM_DEBUG_KMS("metadata desc 1 %d\n", connector->hdr_metadata_type_one);
+	DRM_DEBUG_KMS("max luminance %d\n", connector->hdr_max_luminance);
+	DRM_DEBUG_KMS("avg luminance %d\n", connector->hdr_avg_luminance);
+	DRM_DEBUG_KMS("min luminance %d\n", connector->hdr_min_luminance);
+}
+/*
+ * drm_hdmi_extract_extended_blk_info - Parse the HDMI extended tag blocks
+ * @connector: connector corresponding to the HDMI sink
+ * @edid: handle to the EDID structure
+ * Parses the all extended tag blocks extract sink info for @connector.
+ */
+static void
+drm_hdmi_extract_extended_blk_info(struct drm_connector *connector,
+		const struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == USE_EXTENDED_TAG) {
+				DRM_DEBUG_KMS("found extended tag block = %d\n",
+						db[1]);
+				switch (db[1]) {
+				case VIDEO_CAPABILITY_EXTENDED_DATA_BLOCK:
+					drm_extract_vcdb_info(connector, db);
+					break;
+				case HDR_STATIC_METADATA_EXTENDED_DATA_BLOCK:
+					drm_extract_hdr_db(connector, db);
+					break;
+				default:
+					break;
+				}
+			}
+		}
+	}
+}
+
+static void
+parse_hdmi_hf_vsdb(struct drm_connector *connector, const u8 *db)
+{
+	u8 len = cea_db_payload_len(db);
+
+	if (len < 7)
+		return;
+
+	if (db[4] != 1)
+		return; /* invalid version */
+
+	connector->max_tmds_char = db[5] * 5;
+	connector->scdc_present = db[6] & (1 << 7);
+	connector->rr_capable = db[6] & (1 << 6);
+	connector->flags_3d = db[6] & 0x7;
+	connector->supports_scramble = connector->scdc_present &&
+			(db[6] & (1 << 3));
+
+	DRM_DEBUG_KMS(
+		"HDMI v2: max TMDS char %d, scdc %s, rr %s,3D flags 0x%x, scramble %s\n",
+		connector->max_tmds_char,
+		connector->scdc_present ? "available" : "not available",
+		connector->rr_capable ? "capable" : "not capable",
+		connector->flags_3d,
+		connector->supports_scramble ? "supported" : "not supported");
+}
+
 static void
 monitor_name(struct detailed_timing *t, void *data)
 {
@@ -3962,6 +4123,9 @@
 				/* HDMI Vendor-Specific Data Block */
 				if (cea_db_is_hdmi_vsdb(db))
 					drm_parse_hdmi_vsdb_audio(connector, db);
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_forum_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
 				break;
 			default:
 				break;
@@ -4474,6 +4638,38 @@
 	info->non_desktop = 0;
 }
 
+static void
+drm_hdmi_extract_vsdbs_info(struct drm_connector *connector,
+		const struct edid *edid)
+{
+	const u8 *cea = drm_find_cea_extension(edid);
+	const u8 *db = NULL;
+
+	if (cea && cea_revision(cea) >= 3) {
+		int i, start, end;
+
+		if (cea_db_offsets(cea, &start, &end))
+			return;
+
+		for_each_cea_db(cea, i, start, end) {
+			db = &cea[i];
+
+			if (cea_db_tag(db) == VENDOR_BLOCK) {
+				/* HDMI Vendor-Specific Data Block */
+				if (cea_db_is_hdmi_vsdb(db)) {
+					drm_parse_hdmi_vsdb_video(
+						connector, db);
+					drm_parse_hdmi_vsdb_audio(
+						connector, db);
+				}
+				/* HDMI Forum Vendor-Specific Data Block */
+				else if (cea_db_is_hdmi_forum_vsdb(db))
+					parse_hdmi_hf_vsdb(connector, db);
+			}
+		}
+	}
+}
+
 u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
 {
 	struct drm_display_info *info = &connector->display_info;
@@ -4511,6 +4707,11 @@
 			  connector->name, info->bpc);
 	}
 
+	/* Extract audio and video latency fields for the sink */
+	drm_hdmi_extract_vsdbs_info(connector, edid);
+	/* Extract info from extended tag blocks */
+	drm_hdmi_extract_extended_blk_info(connector, edid);
+
 	/* Only defined for 1.4 with digital displays */
 	if (edid->revision < 4)
 		return quirks;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index ffa8dc3..d4147cd 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -46,6 +46,8 @@
 /* from BKL pushdown */
 DEFINE_MUTEX(drm_global_mutex);
 
+#define MAX_DRM_OPEN_COUNT		20
+
 /**
  * DOC: file operations
  *
@@ -310,6 +312,11 @@
 	if (!dev->open_count++)
 		need_setup = 1;
 
+	if (dev->open_count >= MAX_DRM_OPEN_COUNT) {
+		retcode = -EPERM;
+		goto err_undo;
+	}
+
 	/* share address_space across all char-devs of a single device */
 	filp->f_mapping = dev->anon_inode->i_mapping;
 
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 781af1d..77b1800 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -278,7 +278,8 @@
 	struct drm_framebuffer *fb;
 	int ret;
 
-	if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS)) {
+	if (r->flags & ~(DRM_MODE_FB_INTERLACED | DRM_MODE_FB_MODIFIERS |
+			DRM_MODE_FB_SECURE)) {
 		DRM_DEBUG_KMS("bad framebuffer flags 0x%08x\n", r->flags);
 		return ERR_PTR(-EINVAL);
 	}
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 80b7550..05d7db2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -456,7 +456,7 @@
 		return -EINVAL;
 
 	memset(packet, 0, sizeof(*packet));
-	packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
+	packet->header[2] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
 
 	/* TODO: compute ECC if hardware support is not available */
 
@@ -468,16 +468,16 @@
 	 * and 2.
 	 */
 	if (mipi_dsi_packet_format_is_long(msg->type)) {
-		packet->header[1] = (msg->tx_len >> 0) & 0xff;
-		packet->header[2] = (msg->tx_len >> 8) & 0xff;
+		packet->header[0] = (msg->tx_len >> 0) & 0xff;
+		packet->header[1] = (msg->tx_len >> 8) & 0xff;
 
 		packet->payload_length = msg->tx_len;
 		packet->payload = msg->tx_buf;
 	} else {
 		const u8 *tx = msg->tx_buf;
 
-		packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
-		packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
+		packet->header[0] = (msg->tx_len > 0) ? tx[0] : 0;
+		packet->header[1] = (msg->tx_len > 1) ? tx[1] : 0;
 	}
 
 	packet->size = sizeof(packet->header) + packet->payload_length;
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index cdb10f8..450a51b 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -26,6 +26,9 @@
 
 #include "drm_crtc_internal.h"
 
+#define MAX_BLOB_PROP_SIZE	(PAGE_SIZE * 30)
+#define MAX_BLOB_PROP_COUNT	250
+
 /**
  * DOC: overview
  *
@@ -556,7 +559,8 @@
 	struct drm_property_blob *blob;
 	int ret;
 
-	if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
+	if (!length || length > MAX_BLOB_PROP_SIZE -
+				sizeof(struct drm_property_blob))
 		return ERR_PTR(-EINVAL);
 
 	blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
@@ -782,12 +786,19 @@
 			      void *data, struct drm_file *file_priv)
 {
 	struct drm_mode_create_blob *out_resp = data;
-	struct drm_property_blob *blob;
+	struct drm_property_blob *blob, *bt;
 	int ret = 0;
+	u32 count = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		return -EINVAL;
 
+	list_for_each_entry(bt, &file_priv->blobs, head_file)
+		count++;
+
+	if (count >= MAX_BLOB_PROP_COUNT)
+		return -EINVAL;
+
 	blob = drm_property_create_blob(dev, out_resp->length, NULL);
 	if (IS_ERR(blob))
 		return PTR_ERR(blob);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 736890f..ea44cb9 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -122,6 +122,16 @@
 	  Choose this option to enable DSI PLL driver which provides DSI
 	  source clocks under common clock framework.
 
+config DRM_SDE_WB
+	bool "Enable Writeback support in SDE DRM"
+	depends on DRM_MSM && DRM_MSM_SDE
+	default y
+	help
+	  Choose this option for writeback connector support.
+	  This option enables a virtual writeback connector where
+	  the output image is written back to memory in the format
+	  selected by the connector's mode and property settings.
+
 config DRM_MSM_DSI_28NM_PHY
 	bool "Enable DSI 28nm PHY driver in MSM DRM"
 	depends on DRM_MSM_DSI
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 911a1bf..26acd08 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,9 +1,9 @@
 # SPDX-License-Identifier: GPL-2.0
-ccflags-$(CONFIG_DRM_MSM_DP):= -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging -Idrivers/gpu/drm/msm/dp
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging -Idrivers/gpu/drm/msm/dp
 ccflags-y += -Idrivers/gpu/drm/msm/display-manager
 ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
 ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
-ccflags-$(CONFIG_DRM_MSM_SDE) += -Idrivers/gpu/drm/msm/sde
+ccflags-y += -Idrivers/gpu/drm/msm/sde
 ccflags-y += -Idrivers/media/platform/msm/sde/rotator
 ccflags-y += -Idrivers/gpu/drm/msm/hdmi
 
@@ -12,12 +12,15 @@
 	dp/dp_power.o \
 	dp/dp_catalog.o \
 	dp/dp_catalog_v420.o \
+	dp/dp_catalog_v200.o \
 	dp/dp_aux.o \
 	dp/dp_panel.o \
 	dp/dp_link.o \
 	dp/dp_ctrl.o \
 	dp/dp_audio.o \
 	dp/dp_debug.o \
+	dp/dp_hpd.o \
+	dp/dp_gpio_hpd.o \
 	dp/dp_display.o \
 	dp/dp_drm.o \
 	dp/dp_hdcp2p2.o \
@@ -201,7 +204,9 @@
 	msm_gem_vma.o \
 	msm_gpu.o \
 	msm_iommu.o \
+	msm_smmu.o \
 	msm_perf.o \
+	msm_prop.o \
 	msm_rd.o \
 	msm_ringbuffer.o \
 	msm_submitqueue.o
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
index 3754692..d76b9da 100644
--- a/drivers/gpu/drm/msm/dp/dp_audio.c
+++ b/drivers/gpu/drm/msm/dp/dp_audio.c
@@ -607,10 +607,11 @@
 		rc = -ENODEV;
 		goto end;
 	}
-
+#if defined(CONFIG_MSM_EXT_DISPLAY)
 	rc = msm_ext_disp_register_intf(audio->ext_pdev, ext);
 	if (rc)
 		pr_err("failed to register disp\n");
+#endif
 end:
 	if (pd)
 		of_node_put(pd);
@@ -647,9 +648,11 @@
 		goto end;
 	}
 
+#if defined(CONFIG_MSM_EXT_DISPLAY)
 	rc = msm_ext_disp_deregister_intf(audio->ext_pdev, ext);
 	if (rc)
 		pr_err("failed to deregister disp\n");
+#endif
 
 end:
 	return rc;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 593680e..2ec8d08 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -849,7 +849,7 @@
 
 	/* wait for idle state */
 	cancel_delayed_work(&dp->connect_work);
-	cancel_work(&dp->attention_work);
+	cancel_work_sync(&dp->attention_work);
 	flush_workqueue(dp->wq);
 
 	if (!dp->debug->sim_mode && !dp->parser->no_aux_switch)
@@ -990,7 +990,7 @@
 
 		/* wait for idle state */
 		cancel_delayed_work(&dp->connect_work);
-		cancel_work(&dp->attention_work);
+		cancel_work_sync(&dp->attention_work);
 		flush_workqueue(dp->wq);
 
 		dp_display_handle_disconnect(dp);
diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
index cca0034..34a9f96 100644
--- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
@@ -1267,7 +1267,7 @@
 		connector->funcs->reset(connector);
 
 	for (i = 1; i < MAX_DP_MST_DRM_BRIDGES; i++) {
-		drm_mode_connector_attach_encoder(connector,
+		drm_connector_attach_encoder(connector,
 				dp_mst->mst_bridge[i].encoder);
 	}
 
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0635c3..376f92e 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2014 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -15,11 +16,148 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/msm_drm_notify.h>
+#include <linux/notifier.h>
+
 #include "msm_drv.h"
 #include "msm_gem.h"
 #include "msm_kms.h"
+#include "msm_fence.h"
+#include "sde_trace.h"
 
-static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
+#define MULTIPLE_CONN_DETECTED(x) (x > 1)
+
+struct msm_commit {
+	struct drm_device *dev;
+	struct drm_atomic_state *state;
+	uint32_t crtc_mask;
+	bool nonblock;
+	struct kthread_work commit_work;
+};
+
+static BLOCKING_NOTIFIER_HEAD(msm_drm_notifier_list);
+
+/**
+ * msm_drm_register_client - register a client notifier
+ * @nb: notifier block to callback on events
+ *
+ * This function registers a notifier callback function
+ * to msm_drm_notifier_list, which would be called when
+ * received unblank/power down event.
+ */
+int msm_drm_register_client(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_register(&msm_drm_notifier_list,
+						nb);
+}
+EXPORT_SYMBOL(msm_drm_register_client);
+
+/**
+ * msm_drm_unregister_client - unregister a client notifier
+ * @nb: notifier block to callback on events
+ *
+ * This function unregisters the callback function from
+ * msm_drm_notifier_list.
+ */
+int msm_drm_unregister_client(struct notifier_block *nb)
+{
+	return blocking_notifier_chain_unregister(&msm_drm_notifier_list,
+						  nb);
+}
+EXPORT_SYMBOL(msm_drm_unregister_client);
+
+/**
+ * msm_drm_notifier_call_chain - notify clients of drm_events
+ * @val: event MSM_DRM_EARLY_EVENT_BLANK or MSM_DRM_EVENT_BLANK
+ * @v: notifier data, inculde display id and display blank
+ *     event(unblank or power down).
+ */
+static int msm_drm_notifier_call_chain(unsigned long val, void *v)
+{
+	return blocking_notifier_call_chain(&msm_drm_notifier_list, val,
+					    v);
+}
+
+static inline bool _msm_seamless_for_crtc(struct drm_atomic_state *state,
+			struct drm_crtc_state *crtc_state, bool enable)
+{
+	struct drm_connector *connector = NULL;
+	struct drm_connector_state  *conn_state = NULL;
+	int i = 0;
+	int conn_cnt = 0;
+
+	if (msm_is_mode_seamless(&crtc_state->mode) ||
+		msm_is_mode_seamless_vrr(&crtc_state->adjusted_mode))
+		return true;
+
+	if (msm_is_mode_seamless_dms(&crtc_state->adjusted_mode) && !enable)
+		return true;
+
+	if (!crtc_state->mode_changed && crtc_state->connectors_changed) {
+		for_each_old_connector_in_state(state, connector,
+				conn_state, i) {
+			if ((conn_state->crtc == crtc_state->crtc) ||
+					(connector->state->crtc ==
+					 crtc_state->crtc))
+				conn_cnt++;
+
+			if (MULTIPLE_CONN_DETECTED(conn_cnt))
+				return true;
+		}
+	}
+
+	return false;
+}
+
+static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
+		struct drm_connector_state *old_conn_state, bool enable)
+{
+	if (!old_conn_state || !old_conn_state->crtc)
+		return false;
+
+	if (!old_conn_state->crtc->state->mode_changed &&
+			!old_conn_state->crtc->state->active_changed &&
+			old_conn_state->crtc->state->connectors_changed) {
+		if (old_conn_state->crtc == connector->state->crtc)
+			return true;
+	}
+
+	if (enable)
+		return false;
+
+	if (msm_is_mode_seamless(&connector->encoder->crtc->state->mode))
+		return true;
+
+	if (msm_is_mode_seamless_vrr(
+			&connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
+	if (msm_is_mode_seamless_dms(
+			&connector->encoder->crtc->state->adjusted_mode))
+		return true;
+
+	return false;
+}
+
+/* clear specified crtcs (no longer pending update) */
+static void commit_destroy(struct msm_commit *c)
+{
+	struct msm_drm_private *priv = c->dev->dev_private;
+	uint32_t crtc_mask = c->crtc_mask;
+
+	/* End_atomic */
+	spin_lock(&priv->pending_crtcs_event.lock);
+	DBG("end: %08x", crtc_mask);
+	priv->pending_crtcs &= ~crtc_mask;
+	wake_up_all_locked(&priv->pending_crtcs_event);
+	spin_unlock(&priv->pending_crtcs_event.lock);
+
+	if (c->nonblock)
+		kfree(c);
+}
+
+static void msm_atomic_wait_for_commit_done(
+		struct drm_device *dev,
 		struct drm_atomic_state *old_state)
 {
 	struct drm_crtc *crtc;
@@ -36,6 +174,362 @@
 	}
 }
 
+static void
+msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	struct msm_drm_notifier notifier_data;
+	int i, blank;
+
+	SDE_ATRACE_BEGIN("msm_disable");
+	for_each_old_connector_in_state(old_state, connector,
+			old_conn_state, i) {
+		const struct drm_encoder_helper_funcs *funcs;
+		struct drm_encoder *encoder;
+		struct drm_crtc_state *old_crtc_state;
+		unsigned int crtc_idx;
+
+		/*
+		 * Shut down everything that's in the changeset and currently
+		 * still on. So need to check the old, saved state.
+		 */
+		if (!old_conn_state->crtc)
+			continue;
+
+		crtc_idx = drm_crtc_index(old_conn_state->crtc);
+		old_crtc_state = drm_atomic_get_old_crtc_state(old_state,
+							old_conn_state->crtc);
+
+		if (!old_crtc_state->active ||
+		    !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+			continue;
+
+		encoder = old_conn_state->best_encoder;
+
+		/* We shouldn't get this far if we didn't previously have
+		 * an encoder.. but WARN_ON() rather than explode.
+		 */
+		if (WARN_ON(!encoder))
+			continue;
+
+		if (_msm_seamless_for_conn(connector, old_conn_state, false))
+			continue;
+
+		funcs = encoder->helper_private;
+
+		DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
+				 encoder->base.id, encoder->name);
+
+		if (connector->state->crtc &&
+			connector->state->crtc->state->active_changed) {
+			blank = MSM_DRM_BLANK_POWERDOWN;
+			notifier_data.data = &blank;
+			notifier_data.id = crtc_idx;
+			msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
+						     &notifier_data);
+		}
+		/*
+		 * Each encoder has at most one connector (since we always steal
+		 * it away), so we won't call disable hooks twice.
+		 */
+		drm_bridge_disable(encoder->bridge);
+
+		/* Right function depends upon target state. */
+		if (connector->state->crtc && funcs->prepare)
+			funcs->prepare(encoder);
+		else if (funcs->disable)
+			funcs->disable(encoder);
+		else
+			funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+		drm_bridge_post_disable(encoder->bridge);
+		if (connector->state->crtc &&
+			connector->state->crtc->state->active_changed) {
+			DRM_DEBUG_ATOMIC("Notify blank\n");
+			msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
+						&notifier_data);
+		}
+	}
+
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		const struct drm_crtc_helper_funcs *funcs;
+
+		/* Shut down everything that needs a full modeset. */
+		if (!drm_atomic_crtc_needs_modeset(crtc->state))
+			continue;
+
+		if (!old_crtc_state->active)
+			continue;
+
+		if (_msm_seamless_for_crtc(old_state, crtc->state, false))
+			continue;
+
+		funcs = crtc->helper_private;
+
+		DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
+				 crtc->base.id);
+
+		/* Right function depends upon target state. */
+		if (crtc->state->enable && funcs->prepare)
+			funcs->prepare(crtc);
+		else if (funcs->disable)
+			funcs->disable(crtc);
+		else
+			funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+	}
+	SDE_ATRACE_END("msm_disable");
+}
+
+static void
+msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_connector *connector;
+	struct drm_connector_state *old_conn_state;
+	int i;
+
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+		const struct drm_crtc_helper_funcs *funcs;
+
+		if (!crtc->state->mode_changed)
+			continue;
+
+		funcs = crtc->helper_private;
+
+		if (crtc->state->enable && funcs->mode_set_nofb) {
+			DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
+					 crtc->base.id);
+
+			funcs->mode_set_nofb(crtc);
+		}
+	}
+
+	for_each_old_connector_in_state(old_state, connector,
+			old_conn_state, i) {
+		const struct drm_encoder_helper_funcs *funcs;
+		struct drm_crtc_state *new_crtc_state;
+		struct drm_encoder *encoder;
+		struct drm_display_mode *mode, *adjusted_mode;
+
+		if (!connector->state->best_encoder)
+			continue;
+
+		encoder = connector->state->best_encoder;
+		funcs = encoder->helper_private;
+		new_crtc_state = connector->state->crtc->state;
+		mode = &new_crtc_state->mode;
+		adjusted_mode = &new_crtc_state->adjusted_mode;
+
+		if (!new_crtc_state->mode_changed &&
+				new_crtc_state->connectors_changed) {
+			if (_msm_seamless_for_conn(connector,
+					old_conn_state, false))
+				continue;
+		} else if (!new_crtc_state->mode_changed) {
+			continue;
+		}
+
+		DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
+				 encoder->base.id, encoder->name);
+
+		/*
+		 * Each encoder has at most one connector (since we always steal
+		 * it away), so we won't call mode_set hooks twice.
+		 */
+		if (funcs->mode_set)
+			funcs->mode_set(encoder, mode, adjusted_mode);
+
+		drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
+	}
+}
+
+/**
+ * msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function shuts down all the outputs that need to be shut down and
+ * prepares them (if required) with the new mode.
+ *
+ * For compatibility with legacy crtc helpers this should be called before
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+		struct drm_atomic_state *old_state)
+{
+	msm_disable_outputs(dev, old_state);
+
+	drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
+
+	msm_crtc_set_mode(dev, old_state);
+}
+
+/**
+ * msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy crtc helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+		struct drm_atomic_state *old_state)
+{
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *old_crtc_state;
+	struct drm_crtc_state *new_crtc_state;
+	struct drm_connector *connector;
+	struct drm_connector_state *new_conn_state;
+	struct msm_drm_notifier notifier_data;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	int bridge_enable_count = 0;
+	int i, blank;
+	bool splash = false;
+
+	SDE_ATRACE_BEGIN("msm_enable");
+	for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state,
+			new_crtc_state, i) {
+		const struct drm_crtc_helper_funcs *funcs;
+
+		/* Need to filter out CRTCs where only planes change. */
+		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+			continue;
+
+		if (!new_crtc_state->active)
+			continue;
+
+		if (_msm_seamless_for_crtc(old_state, crtc->state, true))
+			continue;
+
+		funcs = crtc->helper_private;
+
+		if (crtc->state->enable) {
+			DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
+					 crtc->base.id);
+
+			if (funcs->atomic_enable)
+				funcs->atomic_enable(crtc, old_crtc_state);
+			else
+				funcs->commit(crtc);
+		}
+
+		if (msm_needs_vblank_pre_modeset(
+					&new_crtc_state->adjusted_mode))
+			drm_crtc_wait_one_vblank(crtc);
+
+	}
+
+	for_each_new_connector_in_state(old_state, connector,
+			new_conn_state, i) {
+		const struct drm_encoder_helper_funcs *funcs;
+		struct drm_encoder *encoder;
+		struct drm_connector_state *old_conn_state;
+
+		if (!new_conn_state->best_encoder)
+			continue;
+
+		if (!new_conn_state->crtc->state->active ||
+				!drm_atomic_crtc_needs_modeset(
+					new_conn_state->crtc->state))
+			continue;
+
+		old_conn_state = drm_atomic_get_old_connector_state(
+				old_state, connector);
+		if (_msm_seamless_for_conn(connector, old_conn_state, true))
+			continue;
+
+		encoder = connector->state->best_encoder;
+		funcs = encoder->helper_private;
+
+		DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
+				 encoder->base.id, encoder->name);
+
+		if (kms && kms->funcs && kms->funcs->check_for_splash)
+			splash = kms->funcs->check_for_splash(kms);
+
+		if (splash || (connector->state->crtc &&
+			connector->state->crtc->state->active_changed)) {
+			blank = MSM_DRM_BLANK_UNBLANK;
+			notifier_data.data = &blank;
+			notifier_data.id =
+				connector->state->crtc->index;
+			DRM_DEBUG_ATOMIC("Notify early unblank\n");
+			msm_drm_notifier_call_chain(MSM_DRM_EARLY_EVENT_BLANK,
+					    &notifier_data);
+		}
+		/*
+		 * Each encoder has at most one connector (since we always steal
+		 * it away), so we won't call enable hooks twice.
+		 */
+		drm_bridge_pre_enable(encoder->bridge);
+		++bridge_enable_count;
+
+		if (funcs->enable)
+			funcs->enable(encoder);
+		else
+			funcs->commit(encoder);
+	}
+
+	if (kms && kms->funcs && kms->funcs->commit) {
+		DRM_DEBUG_ATOMIC("triggering commit\n");
+		kms->funcs->commit(kms, old_state);
+	}
+
+	/* If no bridges were pre_enabled, skip iterating over them again */
+	if (bridge_enable_count == 0) {
+		SDE_ATRACE_END("msm_enable");
+		return;
+	}
+
+	for_each_new_connector_in_state(old_state, connector,
+			new_conn_state, i) {
+		struct drm_encoder *encoder;
+		struct drm_connector_state *old_conn_state;
+
+		if (!new_conn_state->best_encoder)
+			continue;
+
+		if (!new_conn_state->crtc->state->active ||
+		    !drm_atomic_crtc_needs_modeset(
+				    new_conn_state->crtc->state))
+			continue;
+
+		old_conn_state = drm_atomic_get_old_connector_state(
+				old_state, connector);
+		if (_msm_seamless_for_conn(connector, old_conn_state, true))
+			continue;
+
+		encoder = connector->state->best_encoder;
+
+		DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
+				 encoder->base.id, encoder->name);
+
+		drm_bridge_enable(encoder->bridge);
+
+		if (splash || (connector->state->crtc &&
+			connector->state->crtc->state->active_changed)) {
+			DRM_DEBUG_ATOMIC("Notify unblank\n");
+			msm_drm_notifier_call_chain(MSM_DRM_EVENT_BLANK,
+					    &notifier_data);
+		}
+	}
+	SDE_ATRACE_END("msm_enable");
+}
+
 int msm_atomic_prepare_fb(struct drm_plane *plane,
 			  struct drm_plane_state *new_state)
 {
@@ -57,6 +551,295 @@
 	return msm_framebuffer_prepare(new_state->fb, kms->aspace);
 }
 
+/* The (potentially) asynchronous part of the commit.  At this point
+ * nothing can fail short of armageddon.
+ */
+static void complete_commit(struct msm_commit *c)
+{
+	struct drm_atomic_state *state = c->state;
+	struct drm_device *dev = state->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+
+	drm_atomic_helper_wait_for_fences(dev, state, false);
+
+	kms->funcs->prepare_commit(kms, state);
+
+	msm_atomic_helper_commit_modeset_disables(dev, state);
+
+	drm_atomic_helper_commit_planes(dev, state, 0);
+
+	msm_atomic_helper_commit_modeset_enables(dev, state);
+
+	/* NOTE: _wait_for_vblanks() only waits for vblank on
+	 * enabled CRTCs.  So we end up faulting when disabling
+	 * due to (potentially) unref'ing the outgoing fb's
+	 * before the vblank when the disable has latched.
+	 *
+	 * But if it did wait on disabled (or newly disabled)
+	 * CRTCs, that would be racy (ie. we could have missed
+	 * the irq.  We need some way to poll for pipe shut
+	 * down.  Or just live with occasionally hitting the
+	 * timeout in the CRTC disable path (which really should
+	 * not be critical path)
+	 */
+
+	msm_atomic_wait_for_commit_done(dev, state);
+
+	drm_atomic_helper_cleanup_planes(dev, state);
+
+	kms->funcs->complete_commit(kms, state);
+
+	drm_atomic_state_put(state);
+
+	commit_destroy(c);
+}
+
+static void _msm_drm_commit_work_cb(struct kthread_work *work)
+{
+	struct msm_commit *commit = NULL;
+
+	if (!work) {
+		DRM_ERROR("%s: Invalid commit work data!\n", __func__);
+		return;
+	}
+
+	commit = container_of(work, struct msm_commit, commit_work);
+
+	SDE_ATRACE_BEGIN("complete_commit");
+	complete_commit(commit);
+	SDE_ATRACE_END("complete_commit");
+}
+
+static struct msm_commit *commit_init(struct drm_atomic_state *state,
+	bool nonblock)
+{
+	struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
+
+	if (!c)
+		return NULL;
+
+	c->dev = state->dev;
+	c->state = state;
+	c->nonblock = nonblock;
+
+	kthread_init_work(&c->commit_work, _msm_drm_commit_work_cb);
+
+	return c;
+}
+
+/* Start display thread function */
+static void msm_atomic_commit_dispatch(struct drm_device *dev,
+		struct drm_atomic_state *state, struct msm_commit *commit)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_crtc *crtc = NULL;
+	struct drm_crtc_state *crtc_state = NULL;
+	int ret = -EINVAL, i = 0, j = 0;
+	bool nonblock;
+
+	/* cache since work will kfree commit in non-blocking case */
+	nonblock = commit->nonblock;
+
+	for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
+		for (j = 0; j < priv->num_crtcs; j++) {
+			if (priv->disp_thread[j].crtc_id ==
+						crtc->base.id) {
+				if (priv->disp_thread[j].thread) {
+					kthread_queue_work(
+						&priv->disp_thread[j].worker,
+							&commit->commit_work);
+					/* only return zero if work is
+					 * queued successfully.
+					 */
+					ret = 0;
+				} else {
+					DRM_ERROR(" Error for crtc_id: %d\n",
+						priv->disp_thread[j].crtc_id);
+				}
+				break;
+			}
+		}
+		/*
+		 * TODO: handle cases where there will be more than
+		 * one crtc per commit cycle. Remove this check then.
+		 * Current assumption is there will be only one crtc
+		 * per commit cycle.
+		 */
+		if (j < priv->num_crtcs)
+			break;
+	}
+
+	if (ret) {
+		/**
+		 * this is not expected to happen, but at this point the state
+		 * has been swapped, but we couldn't dispatch to a crtc thread.
+		 * fallback now to a synchronous complete_commit to try and
+		 * ensure that SW and HW state don't get out of sync.
+		 */
+		DRM_ERROR("failed to dispatch commit to any CRTC\n");
+		complete_commit(commit);
+	} else if (!nonblock) {
+		kthread_flush_work(&commit->commit_work);
+	}
+
+	/* free nonblocking commits in this context, after processing */
+	if (!nonblock)
+		kfree(commit);
+}
+
+/**
+ * drm_atomic_helper_commit - commit validated state object
+ * @dev: DRM device
+ * @state: the driver state object
+ * @nonblock: nonblocking commit
+ *
+ * This function commits a with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails.
+ *
+ * RETURNS
+ * Zero for success or -errno.
+ */
+int msm_atomic_commit(struct drm_device *dev,
+		struct drm_atomic_state *state, bool nonblock)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_commit *c;
+	struct drm_crtc *crtc;
+	struct drm_crtc_state *crtc_state;
+	struct drm_plane *plane;
+	struct drm_plane_state *old_plane_state, *new_plane_state;
+	int i, ret;
+
+	if (!priv || priv->shutdown_in_progress) {
+		DRM_ERROR("priv is null or shutdwon is in-progress\n");
+		return -EINVAL;
+	}
+
+	SDE_ATRACE_BEGIN("atomic_commit");
+	ret = drm_atomic_helper_prepare_planes(dev, state);
+	if (ret) {
+		SDE_ATRACE_END("atomic_commit");
+		return ret;
+	}
+
+	c = commit_init(state, nonblock);
+	if (!c) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	/*
+	 * Figure out what crtcs we have:
+	 */
+	for_each_new_crtc_in_state(state, crtc, crtc_state, i)
+		c->crtc_mask |= drm_crtc_mask(crtc);
+
+	/*
+	 * Figure out what fence to wait for:
+	 */
+	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
+			new_plane_state, i) {
+		if ((new_plane_state->fb != old_plane_state->fb)
+				&& new_plane_state->fb) {
+			struct drm_gem_object *obj =
+				msm_framebuffer_bo(new_plane_state->fb, 0);
+			struct msm_gem_object *msm_obj = to_msm_bo(obj);
+			struct dma_fence *fence =
+				reservation_object_get_excl_rcu(msm_obj->resv);
+
+			drm_atomic_set_fence_for_plane(new_plane_state, fence);
+		}
+	}
+
+	/*
+	 * Wait for pending updates on any of the same crtc's and then
+	 * mark our set of crtc's as busy:
+	 */
+
+	/* Start Atomic */
+	spin_lock(&priv->pending_crtcs_event.lock);
+	ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
+			!(priv->pending_crtcs & c->crtc_mask));
+	if (ret == 0) {
+		DBG("start: %08x", c->crtc_mask);
+		priv->pending_crtcs |= c->crtc_mask;
+	}
+	spin_unlock(&priv->pending_crtcs_event.lock);
+
+	if (ret)
+		goto err_free;
+
+	WARN_ON(drm_atomic_helper_swap_state(state, false) < 0);
+
+	/*
+	 * Provide the driver a chance to prepare for output fences. This is
+	 * done after the point of no return, but before asynchronous commits
+	 * are dispatched to work queues, so that the fence preparation is
+	 * finished before the .atomic_commit returns.
+	 */
+	if (priv && priv->kms && priv->kms->funcs &&
+			priv->kms->funcs->prepare_fence)
+		priv->kms->funcs->prepare_fence(priv->kms, state);
+
+	/*
+	 * Everything below can be run asynchronously without the need to grab
+	 * any modeset locks at all under one conditions: It must be guaranteed
+	 * that the asynchronous work has either been cancelled (if the driver
+	 * supports it, which at least requires that the framebuffers get
+	 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
+	 * before the new state gets committed on the software side with
+	 * drm_atomic_helper_swap_state().
+	 *
+	 * This scheme allows new atomic state updates to be prepared and
+	 * checked in parallel to the asynchronous completion of the previous
+	 * update. Which is important since compositors need to figure out the
+	 * composition of the next frame right after having submitted the
+	 * current layout
+	 */
+
+	drm_atomic_state_get(state);
+	msm_atomic_commit_dispatch(dev, state, c);
+
+	SDE_ATRACE_END("atomic_commit");
+
+	return 0;
+err_free:
+	kfree(c);
+error:
+	drm_atomic_helper_cleanup_planes(dev, state);
+	SDE_ATRACE_END("atomic_commit");
+	return ret;
+}
+
+struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev)
+{
+	struct msm_kms_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+	if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
+		kfree(state);
+		return NULL;
+	}
+
+	return &state->base;
+}
+
+void msm_atomic_state_clear(struct drm_atomic_state *s)
+{
+	struct msm_kms_state *state = to_kms_state(s);
+
+	drm_atomic_state_default_clear(&state->base);
+	kfree(state->state);
+	state->state = NULL;
+}
+
+void msm_atomic_state_free(struct drm_atomic_state *state)
+{
+	kfree(to_kms_state(state)->state);
+	drm_atomic_state_default_release(state);
+	kfree(state);
+}
+
 void msm_atomic_commit_tail(struct drm_atomic_state *state)
 {
 	struct drm_device *dev = state->dev;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index be0ff35..5b0b9ff 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -14,7 +15,31 @@
  * You should have received a copy of the GNU General Public License along with
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
 
+#include <linux/of_address.h>
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
 #include <drm/drm_of.h>
 
 #include "msm_drv.h"
@@ -22,7 +47,8 @@
 #include "msm_fence.h"
 #include "msm_gpu.h"
 #include "msm_kms.h"
-
+#include "msm_mmu.h"
+#include "sde_wb.h"
 
 /*
  * MSM driver version:
@@ -37,11 +63,58 @@
 #define MSM_VERSION_MINOR	3
 #define MSM_VERSION_PATCHLEVEL	0
 
+static void msm_fb_output_poll_changed(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = NULL;
+
+	if (!dev) {
+		DRM_ERROR("output_poll_changed failed, invalid input\n");
+		return;
+	}
+
+	priv = dev->dev_private;
+
+	if (priv->fbdev)
+		drm_fb_helper_hotplug_event(priv->fbdev);
+}
+
+/**
+ * msm_atomic_helper_check - validate state object
+ * @dev: DRM device
+ * @state: the driver state object
+ *
+ * This is a wrapper for the drm_atomic_helper_check to check the modeset
+ * and state checking for planes. Additionally it checks if any secure
+ * transition(moving CRTC and planes between secure and non-secure states and
+ * vice versa) is allowed or not. When going to secure state, planes
+ * with fb_mode as dir translated only can be staged on the CRTC, and only one
+ * CRTC should be active.
+ * Also mixing of secure and non-secure is not allowed.
+ *
+ * RETURNS
+ * Zero for success or -errorno.
+ */
+int msm_atomic_check(struct drm_device *dev,
+			    struct drm_atomic_state *state)
+{
+	struct msm_drm_private *priv;
+
+	priv = dev->dev_private;
+	if (priv && priv->kms && priv->kms->funcs &&
+			priv->kms->funcs->atomic_check)
+		return priv->kms->funcs->atomic_check(priv->kms, state);
+
+	return drm_atomic_helper_check(dev, state);
+}
+
 static const struct drm_mode_config_funcs mode_config_funcs = {
 	.fb_create = msm_framebuffer_create,
-	.output_poll_changed = drm_fb_helper_output_poll_changed,
-	.atomic_check = drm_atomic_helper_check,
-	.atomic_commit = drm_atomic_helper_commit,
+	.output_poll_changed = msm_fb_output_poll_changed,
+	.atomic_check = msm_atomic_check,
+	.atomic_commit = msm_atomic_commit,
+	.atomic_state_alloc = msm_atomic_state_alloc,
+	.atomic_state_clear = msm_atomic_state_clear,
+	.atomic_state_free = msm_atomic_state_free,
 };
 
 static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
@@ -180,23 +253,48 @@
 	}
 
 	if (reglog)
-		printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
+		dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n",
+			dbgname, ptr, size);
 
 	return ptr;
 }
 
+unsigned long msm_iomap_size(struct platform_device *pdev, const char *name)
+{
+	struct resource *res;
+
+	if (name)
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	else
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	if (!res) {
+		dev_err(&pdev->dev, "failed to get memory resource: %s\n",
+									name);
+		return 0;
+	}
+
+	return resource_size(res);
+}
+
+void msm_iounmap(struct platform_device *pdev, void __iomem *addr)
+{
+	devm_iounmap(&pdev->dev, addr);
+}
+
 void msm_writel(u32 data, void __iomem *addr)
 {
 	if (reglog)
-		printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+		pr_debug("IO:W %pK %08x\n", addr, data);
 	writel(data, addr);
 }
 
 u32 msm_readl(const void __iomem *addr)
 {
 	u32 val = readl(addr);
+
 	if (reglog)
-		pr_err("IO:R %p %08x\n", addr, val);
+		pr_err("IO:R %pK %08x\n", addr, val);
 	return val;
 }
 
@@ -206,7 +304,7 @@
 	bool enable;
 };
 
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
 {
 	struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
 						struct msm_vblank_ctrl, work);
@@ -215,12 +313,16 @@
 	struct msm_kms *kms = priv->kms;
 	struct vblank_event *vbl_ev, *tmp;
 	unsigned long flags;
+	LIST_HEAD(tmp_head);
 
 	spin_lock_irqsave(&vbl_ctrl->lock, flags);
 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
 		list_del(&vbl_ev->node);
-		spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+		list_add_tail(&vbl_ev->node, &tmp_head);
+	}
+	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
+	list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
 		if (vbl_ev->enable)
 			kms->funcs->enable_vblank(kms,
 						priv->crtcs[vbl_ev->crtc_id]);
@@ -229,11 +331,7 @@
 						priv->crtcs[vbl_ev->crtc_id]);
 
 		kfree(vbl_ev);
-
-		spin_lock_irqsave(&vbl_ctrl->lock, flags);
 	}
-
-	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 }
 
 static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
@@ -254,7 +352,8 @@
 	list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
 	spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-	queue_work(priv->wq, &vbl_ctrl->work);
+	kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+			&vbl_ctrl->work);
 
 	return 0;
 }
@@ -265,24 +364,46 @@
 	struct drm_device *ddev = platform_get_drvdata(pdev);
 	struct msm_drm_private *priv = ddev->dev_private;
 	struct msm_kms *kms = priv->kms;
+	struct msm_gpu *gpu = priv->gpu;
 	struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
 	struct vblank_event *vbl_ev, *tmp;
+	int i;
 
 	/* We must cancel and cleanup any pending vblank enable/disable
 	 * work before drm_irq_uninstall() to avoid work re-enabling an
 	 * irq after uninstall has disabled it.
 	 */
-	cancel_work_sync(&vbl_ctrl->work);
+	kthread_flush_work(&vbl_ctrl->work);
 	list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
 		list_del(&vbl_ev->node);
 		kfree(vbl_ev);
 	}
 
+	/* clean up display commit/event worker threads */
+	for (i = 0; i < priv->num_crtcs; i++) {
+		if (priv->disp_thread[i].thread) {
+			kthread_flush_worker(&priv->disp_thread[i].worker);
+			kthread_stop(priv->disp_thread[i].thread);
+			priv->disp_thread[i].thread = NULL;
+		}
+
+		if (priv->event_thread[i].thread) {
+			kthread_flush_worker(&priv->event_thread[i].worker);
+			kthread_stop(priv->event_thread[i].thread);
+			priv->event_thread[i].thread = NULL;
+		}
+	}
+
 	msm_gem_shrinker_cleanup(ddev);
 
 	drm_kms_helper_poll_fini(ddev);
 
-	drm_dev_unregister(ddev);
+	drm_mode_config_cleanup(ddev);
+
+	if (priv->registered) {
+		drm_dev_unregister(ddev);
+		priv->registered = false;
+	}
 
 	msm_perf_debugfs_cleanup(priv);
 	msm_rd_debugfs_cleanup(priv);
@@ -300,12 +421,18 @@
 	flush_workqueue(priv->wq);
 	destroy_workqueue(priv->wq);
 
-	flush_workqueue(priv->atomic_wq);
-	destroy_workqueue(priv->atomic_wq);
-
 	if (kms && kms->funcs)
 		kms->funcs->destroy(kms);
 
+	if (gpu) {
+		mutex_lock(&ddev->struct_mutex);
+		// XXX what do we do here?
+		//pm_runtime_enable(&pdev->dev);
+		gpu->funcs->pm_suspend(gpu);
+		mutex_unlock(&ddev->struct_mutex);
+		gpu->funcs->destroy(gpu);
+	}
+
 	if (priv->vram.paddr) {
 		unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
 		drm_mm_takedown(&priv->vram.mm);
@@ -315,28 +442,49 @@
 
 	component_unbind_all(dev, ddev);
 
+	sde_dbg_destroy();
+	debugfs_remove_recursive(priv->debug_root);
+
+	sde_power_client_destroy(&priv->phandle, priv->pclient);
+	sde_power_resource_deinit(pdev, &priv->phandle);
+
 	msm_mdss_destroy(ddev);
 
 	ddev->dev_private = NULL;
-	drm_dev_unref(ddev);
-
 	kfree(priv);
 
+	drm_dev_put(ddev);
+
 	return 0;
 }
 
 #define KMS_MDP4 4
 #define KMS_MDP5 5
+#define KMS_SDE  3
 
 static int get_mdp_ver(struct platform_device *pdev)
 {
+#ifdef CONFIG_OF
+	static const struct of_device_id match_types[] = { {
+		.compatible = "qcom,mdss_mdp",
+		.data	= (void	*)KMS_MDP5,
+	},
+	{
+		.compatible = "qcom,sde-kms",
+		.data	= (void	*)KMS_SDE,
+	},
+	{},
+	};
 	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
 
-	return (int) (unsigned long) of_device_get_match_data(dev);
+	match = of_match_node(match_types, dev->of_node);
+	if (match)
+		return (int)(unsigned long)match->data;
+#endif
+	return KMS_MDP4;
 }
 
-#include <linux/of_address.h>
-
 static int msm_init_vram(struct drm_device *dev)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -411,71 +559,48 @@
 	return ret;
 }
 
-static int msm_drm_init(struct device *dev, struct drm_driver *drv)
+#ifdef CONFIG_OF
+static int msm_component_bind_all(struct device *dev,
+				struct drm_device *drm_dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct drm_device *ddev;
-	struct msm_drm_private *priv;
-	struct msm_kms *kms;
 	int ret;
 
-	ddev = drm_dev_alloc(drv, dev);
-	if (IS_ERR(ddev)) {
-		dev_err(dev, "failed to allocate drm_device\n");
-		return PTR_ERR(ddev);
-	}
-
-	platform_set_drvdata(pdev, ddev);
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv) {
-		drm_dev_unref(ddev);
-		return -ENOMEM;
-	}
-
-	ddev->dev_private = priv;
-	priv->dev = ddev;
-
-	ret = msm_mdss_init(ddev);
-	if (ret) {
-		kfree(priv);
-		drm_dev_unref(ddev);
-		return ret;
-	}
-
-	priv->wq = alloc_ordered_workqueue("msm", 0);
-	priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
-
-	INIT_LIST_HEAD(&priv->inactive_list);
-	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-	INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
-	spin_lock_init(&priv->vblank_ctrl.lock);
-
-	drm_mode_config_init(ddev);
-
-	/* Bind all our sub-components: */
-	ret = component_bind_all(dev, ddev);
-	if (ret) {
-		msm_mdss_destroy(ddev);
-		kfree(priv);
-		drm_dev_unref(ddev);
-		return ret;
-	}
-
-	ret = msm_init_vram(ddev);
+	ret = component_bind_all(dev, drm_dev);
 	if (ret)
-		goto fail;
+		DRM_ERROR("component_bind_all failed: %d\n", ret);
 
-	msm_gem_shrinker_init(ddev);
+	return ret;
+}
+#else
+static int msm_component_bind_all(struct device *dev,
+				struct drm_device *drm_dev)
+{
+	return 0;
+}
+#endif
+
+static int msm_power_enable_wrapper(void *handle, void *client, bool enable)
+{
+	return sde_power_resource_enable(handle, client, enable);
+}
+
+static int _msm_drm_init_helper(struct msm_drm_private *priv,
+	struct sched_param param, struct drm_device *ddev,
+	struct device *dev, struct platform_device *pdev,
+	struct msm_kms *kms)
+{
+	int i, ret = 0;
 
 	switch (get_mdp_ver(pdev)) {
 	case KMS_MDP4:
 		kms = mdp4_kms_init(ddev);
-		priv->kms = kms;
 		break;
 	case KMS_MDP5:
 		kms = mdp5_kms_init(ddev);
 		break;
+	case KMS_SDE:
+		kms = sde_kms_init(ddev);
+		break;
 	default:
 		kms = ERR_PTR(-ENODEV);
 		break;
@@ -488,22 +613,210 @@
 		 * and (for example) use dmabuf/prime to share buffers with
 		 * imx drm driver on iMX5
 		 */
+		priv->kms = NULL;
 		dev_err(dev, "failed to load kms\n");
-		ret = PTR_ERR(kms);
-		goto fail;
+		return PTR_ERR(kms);
+		;
 	}
+	priv->kms = kms;
+	pm_runtime_enable(dev);
+
+	/**
+	 * Since kms->funcs->hw_init(kms) might call
+	 * drm_object_property_set_value to initialize some custom
+	 * properties we need to make sure mode_config.funcs are populated
+	 * beforehand to avoid dereferencing an unset value during the
+	 * drm_drv_uses_atomic_modeset check.
+	 */
+	ddev->mode_config.funcs = &mode_config_funcs;
 
 	if (kms) {
 		ret = kms->funcs->hw_init(kms);
 		if (ret) {
 			dev_err(dev, "kms hw init failed: %d\n", ret);
-			goto fail;
+			return ret;
 		}
 	}
 
+	/**
+	 * this priority was found during empiric testing to have appropriate
+	 * realtime scheduling to process display updates and interact with
+	 * other real time and normal priority task
+	 */
+	param.sched_priority = 16;
+	for (i = 0; i < priv->num_crtcs; i++) {
+
+		/* initialize display thread */
+		priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+		kthread_init_worker(&priv->disp_thread[i].worker);
+		priv->disp_thread[i].dev = ddev;
+		priv->disp_thread[i].thread =
+			kthread_run(kthread_worker_fn,
+				&priv->disp_thread[i].worker,
+				"crtc_commit:%d", priv->disp_thread[i].crtc_id);
+		ret = sched_setscheduler(priv->disp_thread[i].thread,
+							SCHED_FIFO, &param);
+		if (ret)
+			pr_warn("display thread priority update failed: %d\n",
+									ret);
+
+		if (IS_ERR(priv->disp_thread[i].thread)) {
+			dev_err(dev, "failed to create crtc_commit kthread\n");
+			priv->disp_thread[i].thread = NULL;
+		}
+
+		/* initialize event thread */
+		priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+		kthread_init_worker(&priv->event_thread[i].worker);
+		priv->event_thread[i].dev = ddev;
+		priv->event_thread[i].thread =
+			kthread_run(kthread_worker_fn,
+				&priv->event_thread[i].worker,
+				"crtc_event:%d", priv->event_thread[i].crtc_id);
+		/**
+		 * event thread should also run at same priority as disp_thread
+		 * because it is handling frame_done events. A lower priority
+		 * event thread and higher priority disp_thread can causes
+		 * frame_pending counters beyond 2. This can lead to commit
+		 * failure at crtc commit level.
+		 */
+		ret = sched_setscheduler(priv->event_thread[i].thread,
+							SCHED_FIFO, &param);
+		if (ret)
+			pr_warn("display event thread priority update failed: %d\n",
+									ret);
+
+		if (IS_ERR(priv->event_thread[i].thread)) {
+			dev_err(dev, "failed to create crtc_event kthread\n");
+			priv->event_thread[i].thread = NULL;
+		}
+
+		if ((!priv->disp_thread[i].thread) ||
+				!priv->event_thread[i].thread) {
+			/* clean up previously created threads if any */
+			for ( ; i >= 0; i--) {
+				if (priv->disp_thread[i].thread) {
+					kthread_stop(
+						priv->disp_thread[i].thread);
+					priv->disp_thread[i].thread = NULL;
+				}
+
+				if (priv->event_thread[i].thread) {
+					kthread_stop(
+						priv->event_thread[i].thread);
+					priv->event_thread[i].thread = NULL;
+				}
+			}
+			return -EINVAL;
+		}
+	}
+
+	/**
+	 * Since pp interrupt is heavy weight, try to queue the work
+	 * into a dedicated worker thread, so that they dont interrupt
+	 * other important events.
+	 */
+	kthread_init_worker(&priv->pp_event_worker);
+	priv->pp_event_thread = kthread_run(kthread_worker_fn,
+			&priv->pp_event_worker, "pp_event");
+
+	ret = sched_setscheduler(priv->pp_event_thread,
+						SCHED_FIFO, &param);
+	if (ret)
+		pr_warn("pp_event thread priority update failed: %d\n",
+								ret);
+
+	if (IS_ERR(priv->pp_event_thread)) {
+		dev_err(dev, "failed to create pp_event kthread\n");
+		ret = PTR_ERR(priv->pp_event_thread);
+		priv->pp_event_thread = NULL;
+		return ret;
+	}
+
+	return 0;
+}
+
+static int msm_drm_init(struct device *dev, struct drm_driver *drv)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct drm_device *ddev;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
+	struct sde_dbg_power_ctrl dbg_power_ctrl = { 0 };
+	int ret;
+	struct sched_param param;
+
+	ddev = drm_dev_alloc(drv, dev);
+	if (!ddev) {
+		dev_err(dev, "failed to allocate drm_device\n");
+		return -ENOMEM;
+	}
+
+	drm_mode_config_init(ddev);
+	platform_set_drvdata(pdev, ddev);
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv) {
+		ret = -ENOMEM;
+		goto priv_alloc_fail;
+	}
+
+	ddev->dev_private = priv;
+	priv->dev = ddev;
+
+	ret = msm_mdss_init(ddev);
+	if (ret)
+		goto mdss_init_fail;
+
+	priv->wq = alloc_ordered_workqueue("msm_drm", 0);
+	init_waitqueue_head(&priv->pending_crtcs_event);
+
+	INIT_LIST_HEAD(&priv->client_event_list);
+	INIT_LIST_HEAD(&priv->inactive_list);
+	INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
+	kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+	spin_lock_init(&priv->vblank_ctrl.lock);
+
+	ret = sde_power_resource_init(pdev, &priv->phandle);
+	if (ret) {
+		pr_err("sde power resource init failed\n");
+		goto power_init_fail;
+	}
+
+	priv->pclient = sde_power_client_create(&priv->phandle, "sde");
+	if (IS_ERR_OR_NULL(priv->pclient)) {
+		pr_err("sde power client create failed\n");
+		ret = -EINVAL;
+		goto power_client_fail;
+	}
+
+	dbg_power_ctrl.handle = &priv->phandle;
+	dbg_power_ctrl.client = priv->pclient;
+	dbg_power_ctrl.enable_fn = msm_power_enable_wrapper;
+	ret = sde_dbg_init(&pdev->dev, &dbg_power_ctrl);
+	if (ret) {
+		dev_err(dev, "failed to init sde dbg: %d\n", ret);
+		goto dbg_init_fail;
+	}
+
+	/* Bind all our sub-components: */
+	ret = msm_component_bind_all(dev, ddev);
+	if (ret)
+		goto bind_fail;
+
+	ret = msm_init_vram(ddev);
+	if (ret)
+		goto fail;
+
 	ddev->mode_config.funcs = &mode_config_funcs;
 	ddev->mode_config.helper_private = &mode_config_helper_funcs;
 
+	ret = _msm_drm_init_helper(priv, param, ddev, dev, pdev, kms);
+	if (ret) {
+		dev_err(dev, "msm_drm_init_helper failed\n");
+		goto fail;
+	}
+
 	ret = drm_vblank_init(ddev, priv->num_crtcs);
 	if (ret < 0) {
 		dev_err(dev, "failed to initialize vblank\n");
@@ -512,7 +825,7 @@
 
 	if (kms) {
 		pm_runtime_get_sync(dev);
-		ret = drm_irq_install(ddev, kms->irq);
+		ret = drm_irq_install(ddev, platform_get_irq(pdev, 0));
 		pm_runtime_put_sync(dev);
 		if (ret < 0) {
 			dev_err(dev, "failed to install IRQ handler\n");
@@ -523,9 +836,18 @@
 	ret = drm_dev_register(ddev, 0);
 	if (ret)
 		goto fail;
+	priv->registered = true;
 
 	drm_mode_config_reset(ddev);
 
+	if (kms && kms->funcs && kms->funcs->cont_splash_config) {
+		ret = kms->funcs->cont_splash_config(kms);
+		if (ret) {
+			dev_err(dev, "kms cont_splash config failed.\n");
+			goto fail;
+		}
+	}
+
 #ifdef CONFIG_DRM_FBDEV_EMULATION
 	if (fbdev)
 		priv->fbdev = msm_fbdev_init(ddev);
@@ -535,6 +857,30 @@
 	if (ret)
 		goto fail;
 
+	priv->debug_root = debugfs_create_dir("debug",
+					ddev->primary->debugfs_root);
+	if (IS_ERR_OR_NULL(priv->debug_root)) {
+		pr_err("debugfs_root create_dir fail, error %ld\n",
+		       PTR_ERR(priv->debug_root));
+		priv->debug_root = NULL;
+		goto fail;
+	}
+
+	ret = sde_dbg_debugfs_register(priv->debug_root);
+	if (ret) {
+		dev_err(dev, "failed to reg sde dbg debugfs: %d\n", ret);
+		goto fail;
+	}
+
+	/* perform subdriver post initialization */
+	if (kms && kms->funcs && kms->funcs->postinit) {
+		ret = kms->funcs->postinit(kms);
+		if (ret) {
+			pr_err("kms post init failed: %d\n", ret);
+			goto fail;
+		}
+	}
+
 	drm_kms_helper_poll_init(ddev);
 
 	return 0;
@@ -542,12 +888,30 @@
 fail:
 	msm_drm_uninit(dev);
 	return ret;
+bind_fail:
+	sde_dbg_destroy();
+dbg_init_fail:
+	sde_power_client_destroy(&priv->phandle, priv->pclient);
+power_client_fail:
+	sde_power_resource_deinit(pdev, &priv->phandle);
+power_init_fail:
+	msm_mdss_destroy(ddev);
+mdss_init_fail:
+	kfree(priv);
+priv_alloc_fail:
+	drm_dev_put(ddev);
+	return ret;
 }
 
 /*
  * DRM operations:
  */
 
+#ifdef CONFIG_QCOM_KGSL
+static void load_gpu(struct drm_device *dev)
+{
+}
+#else
 static void load_gpu(struct drm_device *dev)
 {
 	static DEFINE_MUTEX(init_lock);
@@ -560,6 +924,7 @@
 
 	mutex_unlock(&init_lock);
 }
+#endif
 
 static int context_init(struct drm_device *dev, struct drm_file *file)
 {
@@ -573,6 +938,15 @@
 
 	file->driver_priv = ctx;
 
+	if (dev && dev->dev_private) {
+		struct msm_drm_private *priv = dev->dev_private;
+		struct msm_kms *kms;
+
+		kms = priv->kms;
+		if (kms && kms->funcs && kms->funcs->postopen)
+			kms->funcs->postopen(kms, file);
+	}
+
 	return 0;
 }
 
@@ -592,10 +966,23 @@
 	kfree(ctx);
 }
 
+static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+
+	if (kms && kms->funcs && kms->funcs->preclose)
+		kms->funcs->preclose(kms, file);
+}
+
 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 {
 	struct msm_drm_private *priv = dev->dev_private;
 	struct msm_file_private *ctx = file->driver_priv;
+	struct msm_kms *kms = priv->kms;
+
+	if (kms && kms->funcs && kms->funcs->postclose)
+		kms->funcs->postclose(kms, file);
 
 	mutex_lock(&dev->struct_mutex);
 	if (ctx == priv->lastctx)
@@ -605,6 +992,154 @@
 	context_close(ctx);
 }
 
+static int msm_disable_all_modes_commit(
+		struct drm_device *dev,
+		struct drm_atomic_state *state)
+{
+	struct drm_plane *plane;
+	struct drm_crtc *crtc;
+	unsigned int plane_mask;
+	int ret;
+
+	plane_mask = 0;
+	drm_for_each_plane(plane, dev) {
+		struct drm_plane_state *plane_state;
+
+		plane_state = drm_atomic_get_plane_state(state, plane);
+		if (IS_ERR(plane_state)) {
+			ret = PTR_ERR(plane_state);
+			goto fail;
+		}
+
+		plane_state->rotation = 0;
+
+		plane->old_fb = plane->fb;
+		plane_mask |= 1 << drm_plane_index(plane);
+
+		/* disable non-primary: */
+		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+			continue;
+
+		DRM_DEBUG("disabling plane %d\n", plane->base.id);
+
+		ret = __drm_atomic_helper_disable_plane(plane, plane_state);
+		if (ret != 0)
+			DRM_ERROR("error %d disabling plane %d\n", ret,
+					plane->base.id);
+	}
+
+	drm_for_each_crtc(crtc, dev) {
+		struct drm_mode_set mode_set;
+
+		memset(&mode_set, 0, sizeof(struct drm_mode_set));
+		mode_set.crtc = crtc;
+
+		DRM_DEBUG("disabling crtc %d\n", crtc->base.id);
+
+		ret = __drm_atomic_helper_set_config(&mode_set, state);
+		if (ret != 0)
+			DRM_ERROR("error %d disabling crtc %d\n", ret,
+					crtc->base.id);
+	}
+
+	DRM_DEBUG("committing disables\n");
+	ret = drm_atomic_commit(state);
+
+fail:
+	DRM_DEBUG("disables result %d\n", ret);
+	return ret;
+}
+
+/**
+ * msm_clear_all_modes - disables all planes and crtcs via an atomic commit
+ *	based on restore_fbdev_mode_atomic in drm_fb_helper.c
+ * @dev: device pointer
+ * @Return: 0 on success, otherwise -error
+ */
+static int msm_disable_all_modes(
+		struct drm_device *dev,
+		struct drm_modeset_acquire_ctx *ctx)
+{
+	struct drm_atomic_state *state;
+	int ret, i;
+
+	state = drm_atomic_state_alloc(dev);
+	if (!state)
+		return -ENOMEM;
+
+	state->acquire_ctx = ctx;
+
+	for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
+		ret = msm_disable_all_modes_commit(dev, state);
+		if (ret != -EDEADLK || ret != -ERESTARTSYS)
+			break;
+		drm_atomic_state_clear(state);
+		drm_modeset_backoff(ctx);
+	}
+
+	drm_atomic_state_put(state);
+
+	return ret;
+}
+
+static void msm_lastclose(struct drm_device *dev)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	struct drm_modeset_acquire_ctx ctx;
+	int i, rc;
+
+	/* check for splash status before triggering cleanup
+	 * if we end up here with splash status ON i.e before first
+	 * commit then ignore the last close call
+	 */
+	if (kms && kms->funcs && kms->funcs->check_for_splash
+		&& kms->funcs->check_for_splash(kms))
+		return;
+
+	/*
+	 * clean up vblank disable immediately as this is the last close.
+	 */
+	for (i = 0; i < dev->num_crtcs; i++) {
+		struct drm_vblank_crtc *vblank = &dev->vblank[i];
+		struct timer_list *disable_timer = &vblank->disable_timer;
+
+		if (del_timer_sync(disable_timer))
+			disable_timer->function(disable_timer);
+	}
+
+	/* wait for pending vblank requests to be executed by worker thread */
+	flush_workqueue(priv->wq);
+
+	if (priv->fbdev) {
+		drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+		return;
+	}
+
+	drm_modeset_acquire_init(&ctx, 0);
+retry:
+	rc = drm_modeset_lock_all_ctx(dev, &ctx);
+	if (rc)
+		goto fail;
+
+	rc = msm_disable_all_modes(dev, &ctx);
+	if (rc)
+		goto fail;
+
+	if (kms && kms->funcs && kms->funcs->lastclose)
+		kms->funcs->lastclose(kms, &ctx);
+
+fail:
+	if (rc == -EDEADLK) {
+		drm_modeset_backoff(&ctx);
+		goto retry;
+	} else if (rc) {
+		pr_err("last close failed: %d\n", rc);
+	}
+	drm_modeset_drop_locks(&ctx);
+	drm_modeset_acquire_fini(&ctx);
+}
+
 static irqreturn_t msm_irq(int irq, void *arg)
 {
 	struct drm_device *dev = arg;
@@ -851,6 +1386,328 @@
 	return ret;
 }
 
+static int msm_drm_object_supports_event(struct drm_device *dev,
+		struct drm_msm_event_req *req)
+{
+	int ret = -EINVAL;
+	struct drm_mode_object *arg_obj;
+
+	arg_obj = drm_mode_object_find(dev, NULL, req->object_id,
+				req->object_type);
+	if (!arg_obj)
+		return -ENOENT;
+
+	switch (arg_obj->type) {
+	case DRM_MODE_OBJECT_CRTC:
+	case DRM_MODE_OBJECT_CONNECTOR:
+		ret = 0;
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	drm_mode_object_put(arg_obj);
+
+	return ret;
+}
+
+static int msm_register_event(struct drm_device *dev,
+	struct drm_msm_event_req *req, struct drm_file *file, bool en)
+{
+	int ret = -EINVAL;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_kms *kms = priv->kms;
+	struct drm_mode_object *arg_obj;
+
+	arg_obj = drm_mode_object_find(dev, file, req->object_id,
+				req->object_type);
+	if (!arg_obj)
+		return -ENOENT;
+
+	ret = kms->funcs->register_events(kms, arg_obj, req->event, en);
+
+	drm_mode_object_put(arg_obj);
+
+	return ret;
+}
+
+static int msm_event_client_count(struct drm_device *dev,
+		struct drm_msm_event_req *req_event, bool locked)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	unsigned long flag = 0;
+	struct msm_drm_event *node;
+	int count = 0;
+
+	if (!locked)
+		spin_lock_irqsave(&dev->event_lock, flag);
+	list_for_each_entry(node, &priv->client_event_list, base.link) {
+		if (node->event.type == req_event->event &&
+			node->info.object_id == req_event->object_id)
+			count++;
+	}
+	if (!locked)
+		spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	return count;
+}
+
+static int msm_ioctl_register_event(struct drm_device *dev, void *data,
+				    struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_msm_event_req *req_event = data;
+	struct msm_drm_event *client, *node;
+	unsigned long flag = 0;
+	bool dup_request = false;
+	int ret = 0, count = 0;
+
+	ret = msm_drm_object_supports_event(dev, req_event);
+	if (ret) {
+		DRM_ERROR("unsupported event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flag);
+	list_for_each_entry(node, &priv->client_event_list, base.link) {
+		if (node->base.file_priv != file)
+			continue;
+		if (node->event.type == req_event->event &&
+			node->info.object_id == req_event->object_id) {
+			DRM_DEBUG("duplicate request for event %x obj id %d\n",
+				node->event.type, node->info.object_id);
+			dup_request = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	if (dup_request)
+		return -EALREADY;
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		return -ENOMEM;
+
+	client->base.file_priv = file;
+	client->base.event = &client->event;
+	client->event.type = req_event->event;
+	memcpy(&client->info, req_event, sizeof(client->info));
+
+	/* Get the count of clients that have registered for event.
+	 * Event should be enabled for first client, for subsequent enable
+	 * calls add to client list and return.
+	 */
+	count = msm_event_client_count(dev, req_event, false);
+	/* Add current client to list */
+	spin_lock_irqsave(&dev->event_lock, flag);
+	list_add_tail(&client->base.link, &priv->client_event_list);
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	if (count)
+		return 0;
+
+	ret = msm_register_event(dev, req_event, file, true);
+	if (ret) {
+		DRM_ERROR("failed to enable event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		spin_lock_irqsave(&dev->event_lock, flag);
+		list_del(&client->base.link);
+		spin_unlock_irqrestore(&dev->event_lock, flag);
+		kfree(client);
+	}
+	return ret;
+}
+
+static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
+				      struct drm_file *file)
+{
+	struct msm_drm_private *priv = dev->dev_private;
+	struct drm_msm_event_req *req_event = data;
+	struct msm_drm_event *client = NULL, *node, *temp;
+	unsigned long flag = 0;
+	int count = 0;
+	bool found = false;
+	int ret = 0;
+
+	ret = msm_drm_object_supports_event(dev, req_event);
+	if (ret) {
+		DRM_ERROR("unsupported event %x object %x object id %d\n",
+			req_event->event, req_event->object_type,
+			req_event->object_id);
+		return ret;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flag);
+	list_for_each_entry_safe(node, temp, &priv->client_event_list,
+			base.link) {
+		if (node->event.type == req_event->event &&
+		    node->info.object_id == req_event->object_id &&
+		    node->base.file_priv == file) {
+			client = node;
+			list_del(&client->base.link);
+			found = true;
+			kfree(client);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flag);
+
+	if (!found)
+		return -ENOENT;
+
+	count = msm_event_client_count(dev, req_event, false);
+	if (!count)
+		ret = msm_register_event(dev, req_event, file, false);
+
+	return ret;
+}
+
+void msm_mode_object_event_notify(struct drm_mode_object *obj,
+		struct drm_device *dev, struct drm_event *event, u8 *payload)
+{
+	struct msm_drm_private *priv = NULL;
+	unsigned long flags;
+	struct msm_drm_event *notify, *node;
+	int len = 0, ret;
+
+	if (!obj || !event || !event->length || !payload) {
+		DRM_ERROR("err param obj %pK event %pK len %d payload %pK\n",
+			obj, event, ((event) ? (event->length) : -1),
+			payload);
+		return;
+	}
+	priv = (dev) ? dev->dev_private : NULL;
+	if (!dev || !priv) {
+		DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
+		return;
+	}
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_for_each_entry(node, &priv->client_event_list, base.link) {
+		if (node->event.type != event->type ||
+			obj->id != node->info.object_id)
+			continue;
+		len = event->length + sizeof(struct msm_drm_event);
+		if (node->base.file_priv->event_space < len) {
+			DRM_ERROR("Insufficient space %d for event %x len %d\n",
+				node->base.file_priv->event_space, event->type,
+				len);
+			continue;
+		}
+		notify = kzalloc(len, GFP_ATOMIC);
+		if (!notify)
+			continue;
+		notify->base.file_priv = node->base.file_priv;
+		notify->base.event = &notify->event;
+		notify->event.type = node->event.type;
+		notify->event.length = event->length +
+					sizeof(struct drm_msm_event_resp);
+		memcpy(&notify->info, &node->info, sizeof(notify->info));
+		memcpy(notify->data, payload, event->length);
+		ret = drm_event_reserve_init_locked(dev, node->base.file_priv,
+			&notify->base, &notify->event);
+		if (ret) {
+			kfree(notify);
+			continue;
+		}
+		drm_send_event_locked(dev, &notify->base);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int msm_release(struct inode *inode, struct file *filp)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_minor *minor = file_priv->minor;
+	struct drm_device *dev = minor->dev;
+	struct msm_drm_private *priv = dev->dev_private;
+	struct msm_drm_event *node, *temp, *tmp_node;
+	u32 count;
+	unsigned long flags;
+	LIST_HEAD(tmp_head);
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+	list_for_each_entry_safe(node, temp, &priv->client_event_list,
+			base.link) {
+		if (node->base.file_priv != file_priv)
+			continue;
+		list_del(&node->base.link);
+		list_add_tail(&node->base.link, &tmp_head);
+	}
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	list_for_each_entry_safe(node, temp, &tmp_head,
+			base.link) {
+		list_del(&node->base.link);
+		count = msm_event_client_count(dev, &node->info, false);
+
+		list_for_each_entry(tmp_node, &tmp_head, base.link) {
+			if (tmp_node->event.type == node->info.event &&
+				tmp_node->info.object_id ==
+					node->info.object_id)
+				count++;
+		}
+		if (!count)
+			msm_register_event(dev, &node->info, file_priv, false);
+		kfree(node);
+	}
+
+	return drm_release(inode, filp);
+}
+
+/**
+ * msm_ioctl_rmfb2 - remove an FB from the configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int msm_ioctl_rmfb2(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_framebuffer *fb = NULL;
+	struct drm_framebuffer *fbl = NULL;
+	uint32_t *id = data;
+	int found = 0;
+
+	if (!drm_core_check_feature(dev, DRIVER_MODESET))
+		return -EINVAL;
+
+	fb = drm_framebuffer_lookup(dev, file_priv, *id);
+	if (!fb)
+		return -ENOENT;
+
+	/* drop extra ref from traversing drm_framebuffer_lookup */
+	drm_framebuffer_put(fb);
+
+	mutex_lock(&file_priv->fbs_lock);
+	list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+		if (fb == fbl)
+			found = 1;
+	if (!found) {
+		mutex_unlock(&file_priv->fbs_lock);
+		return -ENOENT;
+	}
+
+	list_del_init(&fb->filp_head);
+	mutex_unlock(&file_priv->fbs_lock);
+
+	drm_framebuffer_put(fb);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_ioctl_rmfb2);
 
 static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
 		struct drm_file *file)
@@ -884,6 +1741,12 @@
 	DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_AUTH|DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT,  msm_ioctl_register_event,
+			  DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT,  msm_ioctl_deregister_event,
+			  DRM_UNLOCKED),
+	DRM_IOCTL_DEF_DRV(MSM_RMFB2, msm_ioctl_rmfb2, DRM_UNLOCKED),
 };
 
 static const struct vm_operations_struct vm_ops = {
@@ -895,7 +1758,7 @@
 static const struct file_operations fops = {
 	.owner              = THIS_MODULE,
 	.open               = drm_open,
-	.release            = drm_release,
+	.release            = msm_release,
 	.unlocked_ioctl     = drm_ioctl,
 	.compat_ioctl       = drm_compat_ioctl,
 	.poll               = drm_poll,
@@ -912,6 +1775,7 @@
 				DRIVER_ATOMIC |
 				DRIVER_MODESET,
 	.open               = msm_open,
+	.preclose           = msm_preclose,
 	.postclose           = msm_postclose,
 	.lastclose          = drm_fb_helper_lastclose,
 	.irq_handler        = msm_irq,
@@ -927,7 +1791,7 @@
 	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
 	.gem_prime_export   = drm_gem_prime_export,
-	.gem_prime_import   = drm_gem_prime_import,
+	.gem_prime_import   = msm_gem_prime_import,
 	.gem_prime_res_obj  = msm_gem_prime_res_obj,
 	.gem_prime_pin      = msm_gem_prime_pin,
 	.gem_prime_unpin    = msm_gem_prime_unpin,
@@ -942,7 +1806,7 @@
 	.ioctls             = msm_ioctls,
 	.num_ioctls         = ARRAY_SIZE(msm_ioctls),
 	.fops               = &fops,
-	.name               = "msm",
+	.name               = "msm_drm",
 	.desc               = "MSM Snapdragon DRM",
 	.date               = "20130625",
 	.major              = MSM_VERSION_MAJOR,
@@ -953,26 +1817,49 @@
 #ifdef CONFIG_PM_SLEEP
 static int msm_pm_suspend(struct device *dev)
 {
-	struct drm_device *ddev = dev_get_drvdata(dev);
-	struct msm_drm_private *priv = ddev->dev_private;
+	struct drm_device *ddev;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
 
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev->dev_private)
+		return -EINVAL;
+
+	priv = ddev->dev_private;
+	kms = priv->kms;
+
+	if (kms && kms->funcs && kms->funcs->pm_suspend)
+		return kms->funcs->pm_suspend(dev);
+
+	/* disable hot-plug polling */
 	drm_kms_helper_poll_disable(ddev);
 
-	priv->pm_state = drm_atomic_helper_suspend(ddev);
-	if (IS_ERR(priv->pm_state)) {
-		drm_kms_helper_poll_enable(ddev);
-		return PTR_ERR(priv->pm_state);
-	}
-
 	return 0;
 }
 
 static int msm_pm_resume(struct device *dev)
 {
-	struct drm_device *ddev = dev_get_drvdata(dev);
-	struct msm_drm_private *priv = ddev->dev_private;
+	struct drm_device *ddev;
+	struct msm_drm_private *priv;
+	struct msm_kms *kms;
 
-	drm_atomic_helper_resume(ddev, priv->pm_state);
+	if (!dev)
+		return -EINVAL;
+
+	ddev = dev_get_drvdata(dev);
+	if (!ddev || !ddev->dev_private)
+		return -EINVAL;
+
+	priv = ddev->dev_private;
+	kms = priv->kms;
+
+	if (kms && kms->funcs && kms->funcs->pm_resume)
+		return kms->funcs->pm_resume(dev);
+
+	/* enable hot-plug polling */
 	drm_kms_helper_poll_enable(ddev);
 
 	return 0;
@@ -1090,15 +1977,31 @@
 
 static int compare_name_mdp(struct device *dev, void *data)
 {
-	return (strstr(dev_name(dev), "mdp") != NULL);
+	return (strnstr(dev_name(dev), "mdp", strlen("mdp")) != NULL);
 }
 
 static int add_display_components(struct device *dev,
 				  struct component_match **matchptr)
 {
-	struct device *mdp_dev;
+	struct device *mdp_dev = NULL;
+	struct device_node *node;
 	int ret;
 
+	if (of_device_is_compatible(dev->of_node, "qcom,sde-kms")) {
+		struct device_node *np = dev->of_node;
+		unsigned int i;
+
+		for (i = 0; ; i++) {
+			node = of_parse_phandle(np, "connectors", i);
+			if (!node)
+				break;
+
+			component_match_add(dev, matchptr, compare_of, node);
+		}
+
+		return 0;
+	}
+
 	/*
 	 * MDP5 based devices don't have a flat hierarchy. There is a top level
 	 * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
@@ -1122,8 +2025,8 @@
 		put_device(mdp_dev);
 
 		/* add the MDP component itself */
-		drm_of_component_match_add(dev, matchptr, compare_of,
-					   mdp_dev->of_node);
+		component_match_add(dev, matchptr, compare_of,
+				   mdp_dev->of_node);
 	} else {
 		/* MDP4 */
 		mdp_dev = dev;
@@ -1136,6 +2039,30 @@
 	return ret;
 }
 
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+		unsigned int domain)
+{
+	struct msm_drm_private *priv = NULL;
+	struct msm_kms *kms;
+	const struct msm_kms_funcs *funcs;
+
+	if ((!dev) || (!dev->dev_private))
+		return NULL;
+
+	priv = dev->dev_private;
+	kms = priv->kms;
+	if (!kms)
+		return NULL;
+
+	funcs = kms->funcs;
+
+	if ((!funcs) || (!funcs->get_address_space))
+		return NULL;
+
+	return funcs->get_address_space(priv->kms, domain);
+}
+
 /*
  * We don't know what's the best binding to link the gpu with the drm device.
  * Fow now, we just hunt for all the possible gpus that we support, and add them
@@ -1148,6 +2075,13 @@
 	{ },
 };
 
+#ifdef CONFIG_QCOM_KGSL
+static int add_gpu_components(struct device *dev,
+			      struct component_match **matchptr)
+{
+	return 0;
+}
+#else
 static int add_gpu_components(struct device *dev,
 			      struct component_match **matchptr)
 {
@@ -1163,6 +2097,7 @@
 
 	return 0;
 }
+#endif
 
 static int msm_drm_bind(struct device *dev)
 {
@@ -1185,8 +2120,8 @@
 
 static int msm_pdev_probe(struct platform_device *pdev)
 {
-	struct component_match *match = NULL;
 	int ret;
+	struct component_match *match = NULL;
 
 	ret = add_display_components(&pdev->dev, &match);
 	if (ret)
@@ -1196,13 +2131,7 @@
 	if (ret)
 		return ret;
 
-	/* on all devices that I am aware of, iommu's which can map
-	 * any address the cpu can see are used:
-	 */
-	ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
-	if (ret)
-		return ret;
-
+	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
 	return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
 }
 
@@ -1211,33 +2140,70 @@
 	component_master_del(&pdev->dev, &msm_drm_ops);
 	of_platform_depopulate(&pdev->dev);
 
+	msm_drm_unbind(&pdev->dev);
+	component_master_del(&pdev->dev, &msm_drm_ops);
 	return 0;
 }
 
+static void msm_pdev_shutdown(struct platform_device *pdev)
+{
+	struct drm_device *ddev = platform_get_drvdata(pdev);
+	struct msm_drm_private *priv = NULL;
+
+	if (!ddev) {
+		DRM_ERROR("invalid drm device node\n");
+		return;
+	}
+
+	priv = ddev->dev_private;
+	if (!priv) {
+		DRM_ERROR("invalid msm drm private node\n");
+		return;
+	}
+
+	msm_lastclose(ddev);
+
+	/* set this after lastclose to allow kickoff from lastclose */
+	priv->shutdown_in_progress = true;
+}
+
 static const struct of_device_id dt_match[] = {
 	{ .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
 	{ .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
-	{}
+	{ .compatible = "qcom,sde-kms", .data = (void *)KMS_SDE },
+	{},
 };
 MODULE_DEVICE_TABLE(of, dt_match);
 
 static struct platform_driver msm_platform_driver = {
 	.probe      = msm_pdev_probe,
 	.remove     = msm_pdev_remove,
+	.shutdown   = msm_pdev_shutdown,
 	.driver     = {
-		.name   = "msm",
+		.name   = "msm_drm",
 		.of_match_table = dt_match,
 		.pm     = &msm_pm_ops,
+		.suppress_bind_attrs = true,
 	},
 };
 
+#ifdef CONFIG_QCOM_KGSL
+void __init adreno_register(void)
+{
+}
+
+void __exit adreno_unregister(void)
+{
+}
+#endif
+
 static int __init msm_drm_register(void)
 {
 	if (!modeset)
 		return -EINVAL;
 
 	DBG("init");
-	msm_mdp_register();
+	msm_smmu_driver_init();
 	msm_dsi_register();
 	msm_edp_register();
 	msm_hdmi_register();
@@ -1253,7 +2219,7 @@
 	adreno_unregister();
 	msm_edp_unregister();
 	msm_dsi_unregister();
-	msm_mdp_unregister();
+	msm_smmu_driver_cleanup();
 }
 
 module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 42d072c..ad27e85 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
@@ -34,7 +33,9 @@
 #include <linux/types.h>
 #include <linux/of_graph.h>
 #include <linux/of_device.h>
+#include <linux/sde_io_util.h>
 #include <asm/sizes.h>
+#include <linux/kthread.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_atomic.h>
@@ -45,6 +46,12 @@
 #include <drm/msm_drm.h>
 #include <drm/drm_gem.h>
 
+#include "sde_power_handle.h"
+
+#define GET_MAJOR_REV(rev)		((rev) >> 28)
+#define GET_MINOR_REV(rev)		(((rev) >> 16) & 0xFFF)
+#define GET_STEP_REV(rev)		((rev) & 0xFFFF)
+
 struct msm_kms;
 struct msm_gpu;
 struct msm_mmu;
@@ -53,35 +60,497 @@
 struct msm_perf_state;
 struct msm_gem_submit;
 struct msm_fence_context;
+struct msm_fence_cb;
 struct msm_gem_address_space;
 struct msm_gem_vma;
 
+#define NUM_DOMAINS    4    /* one for KMS, then one per gpu core (?) */
 #define MAX_CRTCS      8
-#define MAX_PLANES     16
+#define MAX_PLANES     20
 #define MAX_ENCODERS   8
 #define MAX_BRIDGES    8
 #define MAX_CONNECTORS 8
 
+#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+
 struct msm_file_private {
 	rwlock_t queuelock;
 	struct list_head submitqueues;
+
 	int queueid;
 };
 
 enum msm_mdp_plane_property {
-	PLANE_PROP_ZPOS,
+	/* blob properties, always put these first */
+	PLANE_PROP_CSC_V1,
+	PLANE_PROP_CSC_DMA_V1,
+	PLANE_PROP_INFO,
+	PLANE_PROP_SCALER_LUT_ED,
+	PLANE_PROP_SCALER_LUT_CIR,
+	PLANE_PROP_SCALER_LUT_SEP,
+	PLANE_PROP_SKIN_COLOR,
+	PLANE_PROP_SKY_COLOR,
+	PLANE_PROP_FOLIAGE_COLOR,
+	PLANE_PROP_VIG_GAMUT,
+	PLANE_PROP_VIG_IGC,
+	PLANE_PROP_DMA_IGC,
+	PLANE_PROP_DMA_GC,
+	PLANE_PROP_ROT_CAPS_V1,
+
+	/* # of blob properties */
+	PLANE_PROP_BLOBCOUNT,
+
+	/* range properties */
+	PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT,
 	PLANE_PROP_ALPHA,
-	PLANE_PROP_PREMULTIPLIED,
-	PLANE_PROP_MAX_NUM
+	PLANE_PROP_COLOR_FILL,
+	PLANE_PROP_H_DECIMATE,
+	PLANE_PROP_V_DECIMATE,
+	PLANE_PROP_INPUT_FENCE,
+	PLANE_PROP_HUE_ADJUST,
+	PLANE_PROP_SATURATION_ADJUST,
+	PLANE_PROP_VALUE_ADJUST,
+	PLANE_PROP_CONTRAST_ADJUST,
+	PLANE_PROP_EXCL_RECT_V1,
+	PLANE_PROP_ROT_DST_X,
+	PLANE_PROP_ROT_DST_Y,
+	PLANE_PROP_ROT_DST_W,
+	PLANE_PROP_ROT_DST_H,
+	PLANE_PROP_PREFILL_SIZE,
+	PLANE_PROP_PREFILL_TIME,
+	PLANE_PROP_SCALER_V1,
+	PLANE_PROP_SCALER_V2,
+	PLANE_PROP_ROT_OUT_FB,
+	PLANE_PROP_INVERSE_PMA,
+
+	/* enum/bitmask properties */
+	PLANE_PROP_BLEND_OP,
+	PLANE_PROP_SRC_CONFIG,
+	PLANE_PROP_FB_TRANSLATION_MODE,
+	PLANE_PROP_MULTIRECT_MODE,
+
+	/* total # of properties */
+	PLANE_PROP_COUNT
+};
+
+enum msm_mdp_crtc_property {
+	CRTC_PROP_INFO,
+	CRTC_PROP_DEST_SCALER_LUT_ED,
+	CRTC_PROP_DEST_SCALER_LUT_CIR,
+	CRTC_PROP_DEST_SCALER_LUT_SEP,
+
+	/* # of blob properties */
+	CRTC_PROP_BLOBCOUNT,
+
+	/* range properties */
+	CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT,
+	CRTC_PROP_OUTPUT_FENCE,
+	CRTC_PROP_OUTPUT_FENCE_OFFSET,
+	CRTC_PROP_DIM_LAYER_V1,
+	CRTC_PROP_CORE_CLK,
+	CRTC_PROP_CORE_AB,
+	CRTC_PROP_CORE_IB,
+	CRTC_PROP_LLCC_AB,
+	CRTC_PROP_LLCC_IB,
+	CRTC_PROP_DRAM_AB,
+	CRTC_PROP_DRAM_IB,
+	CRTC_PROP_ROT_PREFILL_BW,
+	CRTC_PROP_ROT_CLK,
+	CRTC_PROP_ROI_V1,
+	CRTC_PROP_SECURITY_LEVEL,
+	CRTC_PROP_IDLE_TIMEOUT,
+	CRTC_PROP_DEST_SCALER,
+	CRTC_PROP_CAPTURE_OUTPUT,
+
+	CRTC_PROP_IDLE_PC_STATE,
+
+	/* total # of properties */
+	CRTC_PROP_COUNT
+};
+
+enum msm_mdp_conn_property {
+	/* blob properties, always put these first */
+	CONNECTOR_PROP_SDE_INFO,
+	CONNECTOR_PROP_MODE_INFO,
+	CONNECTOR_PROP_HDR_INFO,
+	CONNECTOR_PROP_EXT_HDR_INFO,
+	CONNECTOR_PROP_PP_DITHER,
+	CONNECTOR_PROP_HDR_METADATA,
+
+	/* # of blob properties */
+	CONNECTOR_PROP_BLOBCOUNT,
+
+	/* range properties */
+	CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT,
+	CONNECTOR_PROP_RETIRE_FENCE,
+	CONNECTOR_PROP_DST_X,
+	CONNECTOR_PROP_DST_Y,
+	CONNECTOR_PROP_DST_W,
+	CONNECTOR_PROP_DST_H,
+	CONNECTOR_PROP_ROI_V1,
+	CONNECTOR_PROP_BL_SCALE,
+	CONNECTOR_PROP_AD_BL_SCALE,
+
+	/* enum/bitmask properties */
+	CONNECTOR_PROP_TOPOLOGY_NAME,
+	CONNECTOR_PROP_TOPOLOGY_CONTROL,
+	CONNECTOR_PROP_AUTOREFRESH,
+	CONNECTOR_PROP_LP,
+	CONNECTOR_PROP_FB_TRANSLATION_MODE,
+	CONNECTOR_PROP_QSYNC_MODE,
+
+	/* total # of properties */
+	CONNECTOR_PROP_COUNT
 };
 
 struct msm_vblank_ctrl {
-	struct work_struct work;
+	struct kthread_work work;
 	struct list_head event_list;
 	spinlock_t lock;
 };
 
 #define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_compression_type - compression method used for pixel stream
+ * @MSM_DISPLAY_COMPRESSION_NONE:     Pixel data is not compressed
+ * @MSM_DISPLAY_COMPRESSION_DSC:      DSC compresison is used
+ */
+enum msm_display_compression_type {
+	MSM_DISPLAY_COMPRESSION_NONE,
+	MSM_DISPLAY_COMPRESSION_DSC,
+};
+
+/**
+ * enum msm_display_compression_ratio - compression ratio
+ * @MSM_DISPLAY_COMPRESSION_NONE: no compression
+ * @MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1: 2 to 1 compression
+ * @MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1: 3 to 1 compression
+ */
+enum msm_display_compression_ratio {
+	MSM_DISPLAY_COMPRESSION_RATIO_NONE,
+	MSM_DISPLAY_COMPRESSION_RATIO_2_TO_1,
+	MSM_DISPLAY_COMPRESSION_RATIO_3_TO_1,
+	MSM_DISPLAY_COMPRESSION_RATIO_MAX,
+};
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE:           Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE:           Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG:           Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID:               EDID supported
+ * @MSM_DISPLAY_ESD_ENABLED:            ESD feature enabled
+ * @MSM_DISPLAY_CAP_MST_MODE:           Display with MST support
+ */
+enum msm_display_caps {
+	MSM_DISPLAY_CAP_VID_MODE	= BIT(0),
+	MSM_DISPLAY_CAP_CMD_MODE	= BIT(1),
+	MSM_DISPLAY_CAP_HOT_PLUG	= BIT(2),
+	MSM_DISPLAY_CAP_EDID		= BIT(3),
+	MSM_DISPLAY_ESD_ENABLED		= BIT(4),
+	MSM_DISPLAY_CAP_MST_MODE	= BIT(5),
+};
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ * @MSM_ENC_ACTIVE_REGION - wait for the TG to be in active pixel region
+ */
+enum msm_event_wait {
+	MSM_ENC_COMMIT_DONE = 0,
+	MSM_ENC_TX_COMPLETE,
+	MSM_ENC_VBLANK,
+	MSM_ENC_ACTIVE_REGION,
+};
+
+/**
+ * struct msm_roi_alignment - region of interest alignment restrictions
+ * @xstart_pix_align: left x offset alignment restriction
+ * @width_pix_align: width alignment restriction
+ * @ystart_pix_align: top y offset alignment restriction
+ * @height_pix_align: height alignment restriction
+ * @min_width: minimum width restriction
+ * @min_height: minimum height restriction
+ */
+struct msm_roi_alignment {
+	uint32_t xstart_pix_align;
+	uint32_t width_pix_align;
+	uint32_t ystart_pix_align;
+	uint32_t height_pix_align;
+	uint32_t min_width;
+	uint32_t min_height;
+};
+
+/**
+ * struct msm_roi_caps - display's region of interest capabilities
+ * @enabled: true if some region of interest is supported
+ * @merge_rois: merge rois before sending to display
+ * @num_roi: maximum number of rois supported
+ * @align: roi alignment restrictions
+ */
+struct msm_roi_caps {
+	bool enabled;
+	bool merge_rois;
+	uint32_t num_roi;
+	struct msm_roi_alignment align;
+};
+
+/**
+ * struct msm_display_dsc_info - defines dsc configuration
+ * @version:                 DSC version.
+ * @scr_rev:                 DSC revision.
+ * @pic_height:              Picture height in pixels.
+ * @pic_width:               Picture width in pixels.
+ * @initial_lines:           Number of initial lines stored in encoder.
+ * @pkt_per_line:            Number of packets per line.
+ * @bytes_in_slice:          Number of bytes in slice.
+ * @eol_byte_num:            Valid bytes at the end of line.
+ * @pclk_per_line:           Compressed width.
+ * @full_frame_slices:       Number of slice per interface.
+ * @slice_height:            Slice height in pixels.
+ * @slice_width:             Slice width in pixels.
+ * @chunk_size:              Chunk size in bytes for slice multiplexing.
+ * @slice_last_group_size:   Size of last group in pixels.
+ * @bpp:                     Target bits per pixel.
+ * @bpc:                     Number of bits per component.
+ * @line_buf_depth:          Line buffer bit depth.
+ * @block_pred_enable:       Block prediction enabled/disabled.
+ * @vbr_enable:              VBR mode.
+ * @enable_422:              Indicates if input uses 4:2:2 sampling.
+ * @convert_rgb:             DSC color space conversion.
+ * @input_10_bits:           10 bit per component input.
+ * @slice_per_pkt:           Number of slices per packet.
+ * @initial_dec_delay:       Initial decoding delay.
+ * @initial_xmit_delay:      Initial transmission delay.
+ * @initial_scale_value:     Scale factor value at the beginning of a slice.
+ * @scale_decrement_interval: Scale set up at the beginning of a slice.
+ * @scale_increment_interval: Scale set up at the end of a slice.
+ * @first_line_bpg_offset:   Extra bits allocated on the first line of a slice.
+ * @nfl_bpg_offset:          Slice specific settings.
+ * @slice_bpg_offset:        Slice specific settings.
+ * @initial_offset:          Initial offset at the start of a slice.
+ * @final_offset:            Maximum end-of-slice value.
+ * @rc_model_size:           Number of bits in RC model.
+ * @det_thresh_flatness:     Flatness threshold.
+ * @max_qp_flatness:         Maximum QP for flatness adjustment.
+ * @min_qp_flatness:         Minimum QP for flatness adjustment.
+ * @edge_factor:             Ratio to detect presence of edge.
+ * @quant_incr_limit0:       QP threshold.
+ * @quant_incr_limit1:       QP threshold.
+ * @tgt_offset_hi:           Upper end of variability range.
+ * @tgt_offset_lo:           Lower end of variability range.
+ * @buf_thresh:              Thresholds in RC model
+ * @range_min_qp:            Min QP allowed.
+ * @range_max_qp:            Max QP allowed.
+ * @range_bpg_offset:        Bits per group adjustment.
+ */
+struct msm_display_dsc_info {
+	u8 version;
+	u8 scr_rev;
+
+	int pic_height;
+	int pic_width;
+	int slice_height;
+	int slice_width;
+
+	int initial_lines;
+	int pkt_per_line;
+	int bytes_in_slice;
+	int bytes_per_pkt;
+	int eol_byte_num;
+	int pclk_per_line;
+	int full_frame_slices;
+	int slice_last_group_size;
+	int bpp;
+	int bpc;
+	int line_buf_depth;
+
+	int slice_per_pkt;
+	int chunk_size;
+	bool block_pred_enable;
+	int vbr_enable;
+	int enable_422;
+	int convert_rgb;
+	int input_10_bits;
+
+	int initial_dec_delay;
+	int initial_xmit_delay;
+	int initial_scale_value;
+	int scale_decrement_interval;
+	int scale_increment_interval;
+	int first_line_bpg_offset;
+	int nfl_bpg_offset;
+	int slice_bpg_offset;
+	int initial_offset;
+	int final_offset;
+
+	int rc_model_size;
+	int det_thresh_flatness;
+	int max_qp_flatness;
+	int min_qp_flatness;
+	int edge_factor;
+	int quant_incr_limit0;
+	int quant_incr_limit1;
+	int tgt_offset_hi;
+	int tgt_offset_lo;
+
+	u32 *buf_thresh;
+	char *range_min_qp;
+	char *range_max_qp;
+	char *range_bpg_offset;
+};
+
+/**
+ * struct msm_compression_info - defined panel compression
+ * @comp_type:        type of compression supported
+ * @comp_ratio:       compression ratio
+ * @dsc_info:         dsc configuration if the compression
+ *                    supported is DSC
+ */
+struct msm_compression_info {
+	enum msm_display_compression_type comp_type;
+	enum msm_display_compression_ratio comp_ratio;
+
+	union{
+		struct msm_display_dsc_info dsc_info;
+	};
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm:       number of layer mixers used
+ * @num_enc:      number of compression encoder blocks used
+ * @num_intf:     number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+	u32 num_lm;
+	u32 num_enc;
+	u32 num_intf;
+};
+
+/**
+ * struct msm_mode_info - defines all msm custom mode info
+ * @frame_rate:      frame_rate of the mode
+ * @vtotal:          vtotal calculated for the mode
+ * @prefill_lines:   prefill lines based on porches.
+ * @jitter_numer:	display panel jitter numerator configuration
+ * @jitter_denom:	display panel jitter denominator configuration
+ * @clk_rate:	     DSI bit clock per lane in HZ.
+ * @topology:        supported topology for the mode
+ * @comp_info:       compression info supported
+ * @roi_caps:        panel roi capabilities
+ * @wide_bus_en:	wide-bus mode cfg for interface module
+ */
+struct msm_mode_info {
+	uint32_t frame_rate;
+	uint32_t vtotal;
+	uint32_t prefill_lines;
+	uint32_t jitter_numer;
+	uint32_t jitter_denom;
+	uint64_t clk_rate;
+	struct msm_display_topology topology;
+	struct msm_compression_info comp_info;
+	struct msm_roi_caps roi_caps;
+	bool wide_bus_en;
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type:          DRM_MODE_CONNECTOR_ display type
+ * @capabilities:       Bitmask of display flags
+ * @num_of_h_tiles:     Number of horizontal tiles in case of split interface
+ * @h_tile_instance:    Controller instance used per tile. Number of elements is
+ *                      based on num_of_h_tiles
+ * @is_connected:       Set to true if display is connected
+ * @width_mm:           Physical width
+ * @height_mm:          Physical height
+ * @max_width:          Max width of display. In case of hot pluggable display
+ *                      this is max width supported by controller
+ * @max_height:         Max height of display. In case of hot pluggable display
+ *                      this is max height supported by controller
+ * @clk_rate:           DSI bit clock per lane in HZ.
+ * @is_primary:         Set to true if display is primary display
+ * @is_te_using_watchdog_timer:  Boolean to indicate watchdog TE is
+ *				 used instead of panel TE in cmd mode panels
+ * @roi_caps:           Region of interest capability info
+ * @qsync_min_fps	Minimum fps supported by Qsync feature
+ * @te_source		vsync source pin information
+ */
+struct msm_display_info {
+	int intf_type;
+	uint32_t capabilities;
+
+	uint32_t num_of_h_tiles;
+	uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+
+	bool is_connected;
+
+	unsigned int width_mm;
+	unsigned int height_mm;
+
+	uint32_t max_width;
+	uint32_t max_height;
+	uint64_t clk_rate;
+
+	bool is_primary;
+	bool is_te_using_watchdog_timer;
+	struct msm_roi_caps roi_caps;
+
+	uint32_t qsync_min_fps;
+	uint32_t te_source;
+};
+
+#define MSM_MAX_ROI	4
+
+/**
+ * struct msm_roi_list - list of regions of interest for a drm object
+ * @num_rects: number of valid rectangles in the roi array
+ * @roi: list of roi rectangles
+ */
+struct msm_roi_list {
+	uint32_t num_rects;
+	struct drm_clip_rect roi[MSM_MAX_ROI];
+};
+
+/**
+ * struct - msm_display_kickoff_params - info for display features at kickoff
+ * @rois: Regions of interest structure for mapping CRTC to Connector output
+ * @qsync_mode: Qsync mode, where 0: disabled 1: continuous mode
+ * @qsync_update: Qsync settings were changed/updated
+ */
+struct msm_display_kickoff_params {
+	struct msm_roi_list *rois;
+	struct drm_msm_ext_hdr_metadata *hdr_meta;
+	uint32_t qsync_mode;
+	bool qsync_update;
+};
+
+/**
+ * struct msm_drm_event - defines custom event notification struct
+ * @base: base object required for event notification by DRM framework.
+ * @event: event object required for event notification by DRM framework.
+ * @info: contains information of DRM object for which events has been
+ *        requested.
+ * @data: memory location which contains response payload for event.
+ */
+struct msm_drm_event {
+	struct drm_pending_event base;
+	struct drm_event event;
+	struct drm_msm_event_req info;
+	u8 data[];
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+	struct drm_device *dev;
+	struct task_struct *thread;
+	unsigned int crtc_id;
+	struct kthread_worker worker;
+};
 
 struct msm_drm_private {
 
@@ -89,6 +558,9 @@
 
 	struct msm_kms *kms;
 
+	struct sde_power_handle phandle;
+	struct sde_power_client *pclient;
+
 	/* subordinate devices, if present: */
 	struct platform_device *gpu_pdev;
 
@@ -123,7 +595,10 @@
 	struct list_head inactive_list;
 
 	struct workqueue_struct *wq;
-	struct workqueue_struct *atomic_wq;
+
+	/* crtcs pending async atomic updates: */
+	uint32_t pending_crtcs;
+	wait_queue_head_t pending_crtcs_event;
 
 	unsigned int num_planes;
 	struct drm_plane *planes[MAX_PLANES];
@@ -131,6 +606,12 @@
 	unsigned int num_crtcs;
 	struct drm_crtc *crtcs[MAX_CRTCS];
 
+	struct msm_drm_thread disp_thread[MAX_CRTCS];
+	struct msm_drm_thread event_thread[MAX_CRTCS];
+
+	struct task_struct *pp_event_thread;
+	struct kthread_worker pp_event_worker;
+
 	unsigned int num_encoders;
 	struct drm_encoder *encoders[MAX_ENCODERS];
 
@@ -141,7 +622,12 @@
 	struct drm_connector *connectors[MAX_CONNECTORS];
 
 	/* Properties */
-	struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+	struct drm_property *plane_property[PLANE_PROP_COUNT];
+	struct drm_property *crtc_property[CRTC_PROP_COUNT];
+	struct drm_property *conn_property[CONNECTOR_PROP_COUNT];
+
+	/* Color processing properties for the crtc */
+	struct drm_property **cp_property;
 
 	/* VRAM carveout, used when no IOMMU: */
 	struct {
@@ -159,8 +645,30 @@
 
 	struct msm_vblank_ctrl vblank_ctrl;
 	struct drm_atomic_state *pm_state;
+
+	/* task holding struct_mutex.. currently only used in submit path
+	 * to detect and reject faults from copy_from_user() for submit
+	 * ioctl.
+	 */
+	struct task_struct *struct_mutex_task;
+
+	/* list of clients waiting for events */
+	struct list_head client_event_list;
+
+	/* whether registered and drm_dev_unregister should be called */
+	bool registered;
+
+	/* msm drv debug root node */
+	struct dentry *debug_root;
+
+	/* update the flag when msm driver receives shutdown notification */
+	bool shutdown_in_progress;
 };
 
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+		((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
 struct msm_format {
 	uint32_t pixel_format;
 };
@@ -168,14 +676,35 @@
 int msm_atomic_prepare_fb(struct drm_plane *plane,
 			  struct drm_plane_state *new_state);
 void msm_atomic_commit_tail(struct drm_atomic_state *state);
+int msm_atomic_commit(struct drm_device *dev,
+	struct drm_atomic_state *state, bool nonblock);
+
+/* callback from wq once fence has passed: */
+struct msm_fence_cb {
+	struct work_struct work;
+	uint32_t fence;
+	void (*func)(struct msm_fence_cb *cb);
+};
+
+void __msm_fence_worker(struct work_struct *work);
+
+#define INIT_FENCE_CB(_cb, _func)  do {                     \
+		INIT_WORK(&(_cb)->work, __msm_fence_worker); \
+		(_cb)->func = _func;                         \
+	} while (0)
+
 struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
 void msm_atomic_state_clear(struct drm_atomic_state *state);
 void msm_atomic_state_free(struct drm_atomic_state *state);
 
 void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt);
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		unsigned int flags);
 int msm_gem_map_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+		struct msm_gem_vma *vma, struct sg_table *sgt, int npages,
+		unsigned int flags);
+
+struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace);
 
 void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
 
@@ -183,6 +712,61 @@
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 		const char *name);
 
+/* For SDE  display */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
+		const char *name);
+
+/**
+ * msm_gem_add_obj_to_aspace_active_list: adds obj to active obj list in aspace
+ */
+void msm_gem_add_obj_to_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj);
+
+/**
+ * msm_gem_remove_obj_from_aspace_active_list: removes obj from  active obj
+ * list in aspace
+ */
+void msm_gem_remove_obj_from_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj);
+
+/**
+ * msm_gem_smmu_address_space_get: returns the aspace pointer for the requested
+ * domain
+ */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_get(struct drm_device *dev,
+		unsigned int domain);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
+/**
+ * msm_gem_aspace_domain_attach_detach: function to inform the attach/detach
+ * of the domain for this aspace
+ */
+void msm_gem_aspace_domain_attach_detach_update(
+		struct msm_gem_address_space *aspace,
+		bool is_detach);
+
+/**
+ * msm_gem_address_space_register_cb: function to register callback for attach
+ * and detach of the domain
+ */
+int msm_gem_address_space_register_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data);
+
+/**
+ * msm_gem_address_space_register_cb: function to unregister callback
+ */
+int msm_gem_address_space_unregister_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data);
+
 void msm_gem_submit_free(struct msm_gem_submit *submit);
 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 		struct drm_file *file);
@@ -190,6 +774,7 @@
 void msm_gem_shrinker_init(struct drm_device *dev);
 void msm_gem_shrinker_cleanup(struct drm_device *dev);
 
+void msm_gem_sync(struct drm_gem_object *obj);
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 			struct vm_area_struct *vma);
 int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -203,6 +788,7 @@
 void msm_gem_put_pages(struct drm_gem_object *obj);
 void msm_gem_put_iova(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace);
+dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj);
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args);
 int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -216,6 +802,8 @@
 		struct dma_buf_attachment *attach, struct sg_table *sg);
 int msm_gem_prime_pin(struct drm_gem_object *obj);
 void msm_gem_prime_unpin(struct drm_gem_object *obj);
+struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
+					    struct dma_buf *dma_buf);
 void *msm_gem_get_vaddr(struct drm_gem_object *obj);
 void *msm_gem_get_vaddr_active(struct drm_gem_object *obj);
 void msm_gem_put_vaddr(struct drm_gem_object *obj);
@@ -242,15 +830,22 @@
 		struct drm_gem_object **bo, uint64_t *iova);
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		struct dma_buf *dmabuf, struct sg_table *sgt);
+int msm_gem_delayed_import(struct drm_gem_object *obj);
 
+void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable);
+void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable);
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace);
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace);
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace, int plane);
+uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb, int plane);
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+		const struct drm_mode_fb_cmd2 *mode_cmd,
+		struct drm_gem_object **bos);
 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
 		struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
 struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev,
@@ -290,6 +885,16 @@
 #endif
 
 struct msm_dsi;
+
+/* *
+ * msm_mode_object_event_notify - notify user-space clients of drm object
+ *                                events.
+ * @obj: mode object (crtc/connector) that is generating the event.
+ * @event: event that needs to be notified.
+ * @payload: payload for the event.
+ */
+void msm_mode_object_event_notify(struct drm_mode_object *obj,
+		struct drm_device *dev, struct drm_event *event, u8 *payload);
 #ifdef CONFIG_DRM_MSM_DSI
 void __init msm_dsi_register(void);
 void __exit msm_dsi_unregister(void);
@@ -348,6 +953,8 @@
 	const char *name);
 void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 		const char *dbgname);
+unsigned long msm_iomap_size(struct platform_device *pdev, const char *name);
+void msm_iounmap(struct platform_device *dev, void __iomem *addr);
 void msm_writel(u32 data, void __iomem *addr);
 u32 msm_readl(const void __iomem *addr);
 
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 2a7348a..45cd190 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -15,6 +16,8 @@
  * this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
 #include <drm/drm_gem_framebuffer_helper.h>
@@ -23,15 +26,17 @@
 #include "msm_kms.h"
 #include "msm_gem.h"
 
+#define MSM_FRAMEBUFFER_FLAG_KMAP	BIT(0)
+
 struct msm_framebuffer {
 	struct drm_framebuffer base;
 	const struct msm_format *format;
+	void *vaddr[MAX_PLANE];
+	atomic_t kmap_count;
+	u32 flags;
 };
 #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
 
-static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
-		const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
-
 static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
 	.create_handle = drm_gem_fb_create_handle,
 	.destroy = drm_gem_fb_destroy,
@@ -40,8 +45,16 @@
 #ifdef CONFIG_DEBUG_FS
 void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
 {
-	int i, n = fb->format->num_planes;
+	struct msm_framebuffer *msm_fb;
+	int i, n;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = fb->format->num_planes;
 	seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
 			fb->width, fb->height, (char *)&fb->format->format,
 			drm_framebuffer_read_refcount(fb), fb->base.id);
@@ -54,6 +67,113 @@
 }
 #endif
 
+void msm_framebuffer_set_keepattrs(struct drm_framebuffer *fb, bool enable)
+{
+	struct msm_framebuffer *msm_fb;
+	int i, n;
+	struct drm_gem_object *bo;
+	struct msm_gem_object *msm_obj;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	if (!fb->format) {
+		DRM_ERROR("from:%pS null fb->format\n",
+				__builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = fb->format->num_planes;
+	for (i = 0; i < n; i++) {
+		bo = msm_framebuffer_bo(fb, i);
+		if (bo) {
+			msm_obj = to_msm_bo(bo);
+			if (enable)
+				msm_obj->flags |= MSM_BO_KEEPATTRS;
+			else
+				msm_obj->flags &= ~MSM_BO_KEEPATTRS;
+		}
+	}
+}
+
+void msm_framebuffer_set_kmap(struct drm_framebuffer *fb, bool enable)
+{
+	struct msm_framebuffer *msm_fb;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	if (enable)
+		msm_fb->flags |= MSM_FRAMEBUFFER_FLAG_KMAP;
+	else
+		msm_fb->flags &= ~MSM_FRAMEBUFFER_FLAG_KMAP;
+}
+
+static int msm_framebuffer_kmap(struct drm_framebuffer *fb)
+{
+	struct msm_framebuffer *msm_fb;
+	int i, n;
+	struct drm_gem_object *bo;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = fb->format->num_planes;
+	if (atomic_inc_return(&msm_fb->kmap_count) > 1)
+		return 0;
+
+	for (i = 0; i < n; i++) {
+		bo = msm_framebuffer_bo(fb, i);
+		if (!bo || !bo->dma_buf) {
+			msm_fb->vaddr[i] = NULL;
+			continue;
+		}
+		dma_buf_begin_cpu_access(bo->dma_buf, DMA_BIDIRECTIONAL);
+		msm_fb->vaddr[i] = dma_buf_kmap(bo->dma_buf, 0);
+		DRM_INFO("FB[%u]: vaddr[%d]:%ux%u:0x%llx\n", fb->base.id, i,
+				fb->width, fb->height, (u64) msm_fb->vaddr[i]);
+	}
+
+	return 0;
+}
+
+static void msm_framebuffer_kunmap(struct drm_framebuffer *fb)
+{
+	struct msm_framebuffer *msm_fb;
+	int i, n;
+	struct drm_gem_object *bo;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = fb->format->num_planes;
+	if (atomic_dec_return(&msm_fb->kmap_count) > 0)
+		return;
+
+	for (i = 0; i < n; i++) {
+		bo = msm_framebuffer_bo(fb, i);
+		if (!bo || !msm_fb->vaddr[i])
+			continue;
+		if (bo->dma_buf) {
+			dma_buf_kunmap(bo->dma_buf, 0, msm_fb->vaddr[i]);
+			dma_buf_end_cpu_access(bo->dma_buf, DMA_BIDIRECTIONAL);
+		}
+		msm_fb->vaddr[i] = NULL;
+	}
+}
+
 /* prepare/pin all the fb's bo's for scanout.  Note that it is not valid
  * to prepare an fb more multiple different initiator 'id's.  But that
  * should be fine, since only the scanout (mdpN) side of things needs
@@ -62,9 +182,17 @@
 int msm_framebuffer_prepare(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace)
 {
-	int ret, i, n = fb->format->num_planes;
+	struct msm_framebuffer *msm_fb;
+	int ret, i, n;
 	uint64_t iova;
 
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = fb->format->num_planes;
 	for (i = 0; i < n; i++) {
 		ret = msm_gem_get_iova(fb->obj[i], aspace, &iova);
 		DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
@@ -72,13 +200,28 @@
 			return ret;
 	}
 
+	if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP)
+		msm_framebuffer_kmap(fb);
+
 	return 0;
 }
 
 void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace)
 {
-	int i, n = fb->format->num_planes;
+	struct msm_framebuffer *msm_fb;
+	int i, n;
+
+	if (fb == NULL) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+	n = fb->format->num_planes;
+
+	if (msm_fb->flags & MSM_FRAMEBUFFER_FLAG_KMAP)
+		msm_framebuffer_kunmap(fb);
 
 	for (i = 0; i < n; i++)
 		msm_gem_put_iova(fb->obj[i], aspace);
@@ -87,20 +230,54 @@
 uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
 		struct msm_gem_address_space *aspace, int plane)
 {
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
 	if (!fb->obj[plane])
 		return 0;
+
 	return msm_gem_iova(fb->obj[plane], aspace) + fb->offsets[plane];
 }
 
+uint32_t msm_framebuffer_phys(struct drm_framebuffer *fb,
+		int plane)
+{
+	struct msm_framebuffer *msm_fb;
+	dma_addr_t phys_addr;
+
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return -EINVAL;
+	}
+
+	msm_fb = to_msm_framebuffer(fb);
+
+	if (!msm_fb->base.obj[plane])
+		return 0;
+
+	phys_addr = msm_gem_get_dma_addr(msm_fb->base.obj[plane]);
+	if (!phys_addr)
+		return 0;
+
+	return phys_addr + fb->offsets[plane];
+}
+
 struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
 {
+	if (!fb) {
+		DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+		return ERR_PTR(-EINVAL);
+	}
+
 	return drm_gem_fb_get_obj(fb, plane);
 }
 
 const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
 {
-	struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
-	return msm_fb->format;
+	return fb ? (to_msm_framebuffer(fb))->format : NULL;
 }
 
 struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
@@ -132,7 +309,7 @@
 	return ERR_PTR(ret);
 }
 
-static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 		const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
 {
 	struct msm_drm_private *priv = dev->dev_private;
@@ -140,14 +317,15 @@
 	struct msm_framebuffer *msm_fb = NULL;
 	struct drm_framebuffer *fb;
 	const struct msm_format *format;
-	int ret, i, n;
+	int ret, i, num_planes;
 	unsigned int hsub, vsub;
+	bool is_modified = false;
 
 	DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
 			dev, mode_cmd, mode_cmd->width, mode_cmd->height,
 			(char *)&mode_cmd->pixel_format);
 
-	n = drm_format_num_planes(mode_cmd->pixel_format);
+	num_planes = drm_format_num_planes(mode_cmd->pixel_format);
 	hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
 	vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
 
@@ -169,29 +347,56 @@
 	fb = &msm_fb->base;
 
 	msm_fb->format = format;
+	atomic_set(&msm_fb->kmap_count, 0);
 
-	if (n > ARRAY_SIZE(fb->obj)) {
+	if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+		for (i = 0; i < ARRAY_SIZE(mode_cmd->modifier); i++) {
+			if (mode_cmd->modifier[i]) {
+				is_modified = true;
+				break;
+			}
+		}
+	}
+
+	if (num_planes > ARRAY_SIZE(fb->obj)) {
 		ret = -EINVAL;
 		goto fail;
 	}
 
-	for (i = 0; i < n; i++) {
-		unsigned int width = mode_cmd->width / (i ? hsub : 1);
-		unsigned int height = mode_cmd->height / (i ? vsub : 1);
-		unsigned int min_size;
-
-		min_size = (height - 1) * mode_cmd->pitches[i]
-			 + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
-			 + mode_cmd->offsets[i];
-
-		if (bos[i]->size < min_size) {
+	if (is_modified) {
+		if (!kms->funcs->check_modified_format) {
+			dev_err(dev->dev, "can't check modified fb format\n");
 			ret = -EINVAL;
 			goto fail;
+		} else {
+			ret = kms->funcs->check_modified_format(
+				kms, msm_fb->format, mode_cmd, bos);
+			if (ret)
+				goto fail;
 		}
+	} else {
+		for (i = 0; i < num_planes; i++) {
+			unsigned int width = mode_cmd->width / (i ? hsub : 1);
+			unsigned int height = mode_cmd->height / (i ? vsub : 1);
+			unsigned int min_size;
+			unsigned int cpp;
 
-		msm_fb->base.obj[i] = bos[i];
+			cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i);
+
+			min_size = (height - 1) * mode_cmd->pitches[i]
+				 + width * cpp
+				 + mode_cmd->offsets[i];
+
+			if (bos[i]->size < min_size) {
+				ret = -EINVAL;
+				goto fail;
+			}
+		}
 	}
 
+	for (i = 0; i < num_planes; i++)
+		msm_fb->base.obj[i] = bos[i];
+
 	drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
 
 	ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f59ca27..f6a8ea2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -19,12 +20,14 @@
 #include <linux/shmem_fs.h>
 #include <linux/dma-buf.h>
 #include <linux/pfn_t.h>
+#include <linux/ion.h>
 
 #include "msm_drv.h"
 #include "msm_fence.h"
 #include "msm_gem.h"
 #include "msm_gpu.h"
 #include "msm_mmu.h"
+#include "sde_dbg.h"
 
 static void msm_gem_vunmap_locked(struct drm_gem_object *obj);
 
@@ -77,6 +80,9 @@
 {
 	struct msm_gem_object *msm_obj = to_msm_bo(obj);
 
+	if (obj->import_attach)
+		return msm_obj->pages;
+
 	if (!msm_obj->pages) {
 		struct drm_device *dev = obj->dev;
 		struct page **p;
@@ -104,12 +110,13 @@
 			return ptr;
 		}
 
-		/* For non-cached buffers, ensure the new pages are clean
-		 * because display controller, GPU, etc. are not coherent:
+		/*
+		 * Make sure to flush the CPU cache for newly allocated memory
+		 * so we don't get ourselves into trouble with a dirty cache
 		 */
 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-			dma_map_sg(dev->dev, msm_obj->sgt->sgl,
-					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+			dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+				msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 	}
 
 	return msm_obj->pages;
@@ -177,6 +184,24 @@
 	/* when we start tracking the pin count, then do something here */
 }
 
+void msm_gem_sync(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj;
+
+	if (!obj)
+		return;
+
+	msm_obj = to_msm_bo(obj);
+
+	/*
+	 * dma_sync_sg_for_device synchronises a single contiguous or
+	 * scatter/gather mapping for the CPU and device.
+	 */
+	dma_sync_sg_for_device(obj->dev->dev, msm_obj->sgt->sgl,
+		       msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+}
+
+
 int msm_gem_mmap_obj(struct drm_gem_object *obj,
 		struct vm_area_struct *vma)
 {
@@ -298,6 +323,25 @@
 	return offset;
 }
 
+dma_addr_t msm_gem_get_dma_addr(struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+	struct sg_table *sgt;
+
+	if (!msm_obj->sgt) {
+		sgt = dma_buf_map_attachment(obj->import_attach,
+						DMA_BIDIRECTIONAL);
+		if (IS_ERR_OR_NULL(sgt)) {
+			DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
+					PTR_ERR(sgt));
+			return 0;
+		}
+		msm_obj->sgt = sgt;
+	}
+
+	return sg_phys(msm_obj->sgt->sgl);
+}
+
 static struct msm_gem_vma *add_vma(struct drm_gem_object *obj,
 		struct msm_gem_address_space *aspace)
 {
@@ -352,7 +396,14 @@
 	WARN_ON(!mutex_is_locked(&msm_obj->lock));
 
 	list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) {
-		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt);
+		msm_gem_unmap_vma(vma->aspace, vma, msm_obj->sgt,
+				msm_obj->flags);
+		/*
+		 * put_iova removes the domain connected to the obj which makes
+		 * the aspace inaccessible. Store the aspace, as it is used to
+		 * update the active_list during gem_free_obj and gem_purge.
+		 */
+		msm_obj->aspace = vma->aspace;
 		del_vma(vma);
 	}
 }
@@ -376,6 +427,51 @@
 
 	if (!vma) {
 		struct page **pages;
+		struct device *dev;
+		struct dma_buf *dmabuf;
+		bool reattach = false;
+
+		/*
+		 * both secure/non-secure domains are attached with the default
+		 * devive (non-sec) with dma_buf_attach during
+		 * msm_gem_prime_import. detach and attach the correct device
+		 * to the dma_buf based on the aspace domain.
+		 */
+		dev = msm_gem_get_aspace_device(aspace);
+		if (dev && obj->import_attach &&
+				(dev != obj->import_attach->dev)) {
+			dmabuf = obj->import_attach->dmabuf;
+
+			DRM_DEBUG("detach nsec-dev:%pK attach sec-dev:%pK\n",
+					 obj->import_attach->dev, dev);
+			SDE_EVT32(obj->import_attach->dev, dev, msm_obj->sgt);
+
+
+			if (msm_obj->sgt)
+				dma_buf_unmap_attachment(obj->import_attach,
+							msm_obj->sgt,
+							DMA_BIDIRECTIONAL);
+			dma_buf_detach(dmabuf, obj->import_attach);
+
+			obj->import_attach = dma_buf_attach(dmabuf, dev);
+			if (IS_ERR(obj->import_attach)) {
+				DRM_ERROR("dma_buf_attach failure, err=%ld\n",
+						PTR_ERR(obj->import_attach));
+				goto unlock;
+			}
+			reattach = true;
+		}
+
+		/* perform delayed import for buffers without existing sgt */
+		if (((msm_obj->flags & MSM_BO_EXTBUF) && !(msm_obj->sgt))
+				|| reattach) {
+			ret = msm_gem_delayed_import(obj);
+			if (ret) {
+				DRM_ERROR("delayed dma-buf import failed %d\n",
+						ret);
+				goto unlock;
+			}
+		}
 
 		vma = add_vma(obj, aspace);
 		if (IS_ERR(vma)) {
@@ -390,13 +486,20 @@
 		}
 
 		ret = msm_gem_map_vma(aspace, vma, msm_obj->sgt,
-				obj->size >> PAGE_SHIFT);
+				obj->size >> PAGE_SHIFT,
+				msm_obj->flags);
 		if (ret)
 			goto fail;
 	}
 
 	*iova = vma->iova;
 
+	if (aspace &&  !msm_obj->in_active_list) {
+		mutex_lock(&aspace->list_lock);
+		msm_gem_add_obj_to_aspace_active_list(aspace, obj);
+		mutex_unlock(&aspace->list_lock);
+	}
+
 	mutex_unlock(&msm_obj->lock);
 	return 0;
 
@@ -435,6 +538,60 @@
 	// things that are no longer needed..
 }
 
+void msm_gem_aspace_domain_attach_detach_update(
+		struct msm_gem_address_space *aspace,
+		bool is_detach)
+{
+	struct msm_gem_object *msm_obj;
+	struct drm_gem_object *obj;
+	struct aspace_client *aclient;
+	int ret;
+	uint64_t iova;
+
+	if (!aspace)
+		return;
+
+	mutex_lock(&aspace->list_lock);
+	if (is_detach) {
+		/* Indicate to clients domain is getting detached */
+		list_for_each_entry(aclient, &aspace->clients, list) {
+			if (aclient->cb)
+				aclient->cb(aclient->cb_data,
+						is_detach);
+		}
+
+		/**
+		 * Unmap active buffers,
+		 * typically clients should do this when the callback is called,
+		 * but this needs to be done for the buffers which are not
+		 * attached to any planes.
+		 */
+		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
+			obj = &msm_obj->base;
+			if (obj->import_attach)
+				put_iova(obj);
+		}
+	} else {
+		/* map active buffers */
+		list_for_each_entry(msm_obj, &aspace->active_list, iova_list) {
+			obj = &msm_obj->base;
+			ret = msm_gem_get_iova(obj, aspace, &iova);
+			if (ret) {
+				mutex_unlock(&aspace->list_lock);
+				return;
+			}
+		}
+
+		/* Indicate to clients domain is attached */
+		list_for_each_entry(aclient, &aspace->clients, list) {
+			if (aclient->cb)
+				aclient->cb(aclient->cb_data,
+						is_detach);
+		}
+	}
+	mutex_unlock(&aspace->list_lock);
+}
+
 int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
 		struct drm_mode_create_dumb *args)
 {
@@ -493,8 +650,20 @@
 			ret = PTR_ERR(pages);
 			goto fail;
 		}
-		msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+
+		if (obj->import_attach) {
+			ret = dma_buf_begin_cpu_access(
+				obj->import_attach->dmabuf, DMA_BIDIRECTIONAL);
+			if (ret)
+				goto fail;
+
+			msm_obj->vaddr =
+				dma_buf_vmap(obj->import_attach->dmabuf);
+		} else {
+			msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
 				VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+		}
+
 		if (msm_obj->vaddr == NULL) {
 			ret = -ENOMEM;
 			goto fail;
@@ -569,6 +738,12 @@
 	mutex_lock_nested(&msm_obj->lock, subclass);
 
 	put_iova(obj);
+	if (msm_obj->aspace) {
+		mutex_lock(&msm_obj->aspace->list_lock);
+		msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
+				obj);
+		mutex_unlock(&msm_obj->aspace->list_lock);
+	}
 
 	msm_gem_vunmap_locked(obj);
 
@@ -601,7 +776,14 @@
 	if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
 		return;
 
-	vunmap(msm_obj->vaddr);
+	if (obj->import_attach) {
+		dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+		dma_buf_end_cpu_access(obj->import_attach->dmabuf,
+						DMA_BIDIRECTIONAL);
+	} else {
+		vunmap(msm_obj->vaddr);
+	}
+
 	msm_obj->vaddr = NULL;
 }
 
@@ -803,6 +985,12 @@
 	mutex_lock(&msm_obj->lock);
 
 	put_iova(obj);
+	if (msm_obj->aspace) {
+		mutex_lock(&msm_obj->aspace->list_lock);
+		msm_gem_remove_obj_from_aspace_active_list(msm_obj->aspace,
+				obj);
+		mutex_unlock(&msm_obj->aspace->list_lock);
+	}
 
 	if (obj->import_attach) {
 		if (msm_obj->vaddr)
@@ -887,6 +1075,9 @@
 
 	INIT_LIST_HEAD(&msm_obj->submit_entry);
 	INIT_LIST_HEAD(&msm_obj->vmas);
+	INIT_LIST_HEAD(&msm_obj->iova_list);
+	msm_obj->aspace = NULL;
+	msm_obj->in_active_list = false;
 
 	if (struct_mutex_locked) {
 		WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -980,13 +1171,61 @@
 	return _msm_gem_new(dev, size, flags, false);
 }
 
+int msm_gem_delayed_import(struct drm_gem_object *obj)
+{
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt;
+	struct msm_gem_object *msm_obj;
+	int ret = 0;
+
+	if (!obj) {
+		DRM_ERROR("NULL drm gem object\n");
+		return -EINVAL;
+	}
+
+	msm_obj = to_msm_bo(obj);
+
+	if (!obj->import_attach) {
+		DRM_ERROR("NULL dma_buf_attachment in drm gem object\n");
+		return -EINVAL;
+	}
+
+	attach = obj->import_attach;
+	attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
+
+	if (msm_obj->flags & MSM_BO_SKIPSYNC)
+		attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+	if (msm_obj->flags & MSM_BO_KEEPATTRS)
+		attach->dma_map_attrs |=
+				DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
+
+	/*
+	 * dma_buf_map_attachment will call dma_map_sg for ion buffer
+	 * mapping, and iova will get mapped when the function returns.
+	 */
+	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+	if (IS_ERR(sgt)) {
+		ret = PTR_ERR(sgt);
+		DRM_ERROR("dma_buf_map_attachment failure, err=%d\n",
+				ret);
+		goto fail_import;
+	}
+	msm_obj->sgt = sgt;
+	msm_obj->pages = NULL;
+
+fail_import:
+	return ret;
+}
+
 struct drm_gem_object *msm_gem_import(struct drm_device *dev,
 		struct dma_buf *dmabuf, struct sg_table *sgt)
 {
 	struct msm_gem_object *msm_obj;
-	struct drm_gem_object *obj;
+	struct drm_gem_object *obj = NULL;
 	uint32_t size;
-	int ret, npages;
+	int ret;
+	unsigned long flags = 0;
 
 	/* if we don't have IOMMU, don't bother pretending we can import: */
 	if (!iommu_present(&platform_bus_type)) {
@@ -996,28 +1235,39 @@
 
 	size = PAGE_ALIGN(dmabuf->size);
 
-	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
+	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj,
+			false);
 	if (ret)
 		goto fail;
 
 	drm_gem_private_object_init(dev, obj, size);
 
-	npages = size / PAGE_SIZE;
-
 	msm_obj = to_msm_bo(obj);
 	mutex_lock(&msm_obj->lock);
 	msm_obj->sgt = sgt;
-	msm_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
-	if (!msm_obj->pages) {
-		mutex_unlock(&msm_obj->lock);
-		ret = -ENOMEM;
-		goto fail;
-	}
+	msm_obj->pages = NULL;
+	/*
+	 * 1) If sg table is NULL, user should call msm_gem_delayed_import
+	 * to add back the sg table to the drm gem object.
+	 *
+	 * 2) Add buffer flag unconditionally for all import cases.
+	 *    # Cached buffer will be attached immediately hence sgt will
+	 *      be available upon gem obj creation.
+	 *    # Un-cached buffer will follow delayed attach hence sgt
+	 *      will be NULL upon gem obj creation.
+	 */
+	msm_obj->flags |= MSM_BO_EXTBUF;
 
-	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
+	/*
+	 * For all uncached buffers, there is no need to perform cache
+	 * maintenance on dma map/unmap time.
+	 */
+	ret = dma_buf_get_flags(dmabuf, &flags);
 	if (ret) {
-		mutex_unlock(&msm_obj->lock);
-		goto fail;
+		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
+	} else if ((flags & ION_FLAG_CACHED) == 0) {
+		DRM_DEBUG("Buffer is uncached type\n");
+		msm_obj->flags |= MSM_BO_SKIPSYNC;
 	}
 
 	mutex_unlock(&msm_obj->lock);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index c5d9bd3..49a4412 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -24,6 +24,39 @@
 
 /* Additional internal-use only BO flags: */
 #define MSM_BO_STOLEN        0x10000000    /* try to use stolen/splash memory */
+#define MSM_BO_KEEPATTRS     0x20000000    /* keep h/w bus attributes */
+#define MSM_BO_SKIPSYNC      0x40000000    /* skip dmabuf cpu sync */
+#define MSM_BO_EXTBUF        0x80000000    /* indicate BO is an import buffer */
+
+struct msm_gem_object;
+
+struct msm_gem_aspace_ops {
+	int (*map)(struct msm_gem_address_space *space, struct msm_gem_vma *vma,
+		struct sg_table *sgt, int npages, unsigned int flags);
+
+	void (*unmap)(struct msm_gem_address_space *space,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		unsigned int flags);
+
+	void (*destroy)(struct msm_gem_address_space *space);
+	void (*add_to_active)(struct msm_gem_address_space *space,
+		struct msm_gem_object *obj);
+	void (*remove_from_active)(struct msm_gem_address_space *space,
+		struct msm_gem_object *obj);
+	int (*register_cb)(struct msm_gem_address_space *space,
+			void (*cb)(void *cb, bool data),
+			void *cb_data);
+	int (*unregister_cb)(struct msm_gem_address_space *space,
+			void (*cb)(void *cb, bool data),
+			void *cb_data);
+};
+
+struct aspace_client {
+	void (*cb)(void *cb, bool data);
+	void *cb_data;
+	struct list_head list;
+};
+
 
 struct msm_gem_address_space {
 	const char *name;
@@ -34,6 +67,14 @@
 	spinlock_t lock; /* Protects drm_mm node allocation/removal */
 	struct msm_mmu *mmu;
 	struct kref kref;
+	bool domain_attached;
+	const struct msm_gem_aspace_ops *ops;
+	struct drm_device *dev;
+	/* list of mapped objects */
+	struct list_head active_list;
+	/* list of clients */
+	struct list_head clients;
+	struct mutex list_lock; /* Protects active_list & clients */
 };
 
 struct msm_gem_vma {
@@ -91,6 +132,10 @@
 	 */
 	struct drm_mm_node *vram_node;
 	struct mutex lock; /* Protects resources associated with bo */
+	struct list_head iova_list;
+
+	struct msm_gem_address_space *aspace;
+	bool in_active_list;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 13403c6..c12fc7b 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -19,6 +20,7 @@
 #include "msm_gem.h"
 
 #include <linux/dma-buf.h>
+#include <linux/ion.h>
 
 struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
 {
@@ -77,3 +79,91 @@
 
 	return msm_obj->resv;
 }
+
+
+struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev,
+					    struct dma_buf *dma_buf)
+{
+	struct dma_buf_attachment *attach;
+	struct sg_table *sgt = NULL;
+	struct drm_gem_object *obj;
+	struct device *attach_dev;
+	unsigned long flags = 0;
+	int ret;
+
+	if (!dma_buf)
+		return ERR_PTR(-EINVAL);
+
+	if (dma_buf->priv && !dma_buf->ops->begin_cpu_access) {
+		obj = dma_buf->priv;
+		if (obj->dev == dev) {
+			/*
+			 * Importing dmabuf exported from out own gem increases
+			 * refcount on gem itself instead of f_count of dmabuf.
+			 */
+			drm_gem_object_get(obj);
+			return obj;
+		}
+	}
+
+	if (!dev->driver->gem_prime_import_sg_table) {
+		DRM_ERROR("NULL gem_prime_import_sg_table\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	attach_dev = dev->dev;
+	attach = dma_buf_attach(dma_buf, attach_dev);
+	if (IS_ERR(attach)) {
+		DRM_ERROR("dma_buf_attach failure, err=%ld\n", PTR_ERR(attach));
+		return ERR_CAST(attach);
+	}
+
+	get_dma_buf(dma_buf);
+
+	/*
+	 * For cached buffers where CPU access is required, dma_map_attachment
+	 * must be called now to allow user-space to perform cpu sync begin/end
+	 * otherwise do delayed mapping during the commit.
+	 */
+	ret = dma_buf_get_flags(dma_buf, &flags);
+	if (ret) {
+		DRM_ERROR("dma_buf_get_flags failure, err=%d\n", ret);
+		goto fail_put;
+	} else if (flags & ION_FLAG_CACHED) {
+		attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
+		sgt = dma_buf_map_attachment(
+				attach, DMA_BIDIRECTIONAL);
+		if (IS_ERR(sgt)) {
+			ret = PTR_ERR(sgt);
+			DRM_ERROR(
+			"dma_buf_map_attachment failure, err=%d\n",
+				ret);
+			goto fail_detach;
+		}
+	}
+
+	/*
+	 * If importing a NULL sg table (i.e. for uncached buffers),
+	 * create a drm gem object with only the dma buf attachment.
+	 */
+	obj = dev->driver->gem_prime_import_sg_table(dev, attach, sgt);
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		DRM_ERROR("gem_prime_import_sg_table failure, err=%d\n", ret);
+		goto fail_unmap;
+	}
+
+	obj->import_attach = attach;
+
+	return obj;
+
+fail_unmap:
+	if (sgt)
+		dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
+fail_detach:
+	dma_buf_detach(dma_buf, attach);
+fail_put:
+	dma_buf_put(dma_buf);
+
+	return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index ffbec22..da9ae7a 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -1,4 +1,5 @@
 /*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2016 Red Hat
  * Author: Rob Clark <robdclark@gmail.com>
  *
@@ -19,15 +20,184 @@
 #include "msm_gem.h"
 #include "msm_mmu.h"
 
+/* SDE address space operations */
+static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		unsigned int flags)
+{
+	if (!vma->iova)
+		return;
+
+	if (aspace) {
+		aspace->mmu->funcs->unmap_dma_buf(aspace->mmu, sgt,
+				DMA_BIDIRECTIONAL, flags);
+	}
+
+	vma->iova = 0;
+	msm_gem_address_space_put(aspace);
+}
+
+static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		int npages, unsigned int flags)
+{
+	int ret = -EINVAL;
+
+	if (!aspace || !aspace->domain_attached)
+		return ret;
+
+	ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt,
+			DMA_BIDIRECTIONAL, flags);
+	if (!ret)
+		vma->iova = sg_dma_address(sgt->sgl);
+
+	/* Get a reference to the aspace to keep it around */
+	kref_get(&aspace->kref);
+
+	return ret;
+}
+
+static void smmu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+	if (aspace->mmu)
+		aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+static void smmu_aspace_add_to_active(
+		struct msm_gem_address_space *aspace,
+		struct msm_gem_object *msm_obj)
+{
+	WARN_ON(!mutex_is_locked(&aspace->list_lock));
+	list_move_tail(&msm_obj->iova_list, &aspace->active_list);
+}
+
+static void smmu_aspace_remove_from_active(
+		struct msm_gem_address_space *aspace,
+		struct msm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj, *next;
+
+	WARN_ON(!mutex_is_locked(&aspace->list_lock));
+
+	list_for_each_entry_safe(msm_obj, next, &aspace->active_list,
+			iova_list) {
+		if (msm_obj == obj) {
+			list_del(&msm_obj->iova_list);
+			break;
+		}
+	}
+}
+
+static int smmu_aspace_register_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	struct aspace_client *aclient = NULL;
+	struct aspace_client *temp;
+
+	if (!aspace)
+		return -EINVAL;
+
+	if (!aspace->domain_attached)
+		return -EACCES;
+
+	aclient = kzalloc(sizeof(*aclient), GFP_KERNEL);
+	if (!aclient)
+		return -ENOMEM;
+
+	aclient->cb = cb;
+	aclient->cb_data = cb_data;
+	INIT_LIST_HEAD(&aclient->list);
+
+	/* check if callback is already registered */
+	mutex_lock(&aspace->list_lock);
+	list_for_each_entry(temp, &aspace->clients, list) {
+		if ((temp->cb == aclient->cb) &&
+			(temp->cb_data == aclient->cb_data)) {
+			kfree(aclient);
+			mutex_unlock(&aspace->list_lock);
+			return -EEXIST;
+		}
+	}
+
+	list_move_tail(&aclient->list, &aspace->clients);
+	mutex_unlock(&aspace->list_lock);
+
+	return 0;
+}
+
+static int smmu_aspace_unregister_cb(
+		struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	struct aspace_client *aclient = NULL;
+	int rc = -ENOENT;
+
+	if (!aspace || !cb)
+		return -EINVAL;
+
+	mutex_lock(&aspace->list_lock);
+	list_for_each_entry(aclient, &aspace->clients, list) {
+		if ((aclient->cb == cb) &&
+			(aclient->cb_data == cb_data)) {
+			list_del(&aclient->list);
+			kfree(aclient);
+			rc = 0;
+			break;
+		}
+	}
+	mutex_unlock(&aspace->list_lock);
+
+	return rc;
+}
+
+static const struct msm_gem_aspace_ops smmu_aspace_ops = {
+	.map = smmu_aspace_map_vma,
+	.unmap = smmu_aspace_unmap_vma,
+	.destroy = smmu_aspace_destroy,
+	.add_to_active = smmu_aspace_add_to_active,
+	.remove_from_active = smmu_aspace_remove_from_active,
+	.register_cb = smmu_aspace_register_cb,
+	.unregister_cb = smmu_aspace_unregister_cb,
+};
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct drm_device *dev, struct msm_mmu *mmu,
+		const char *name)
+{
+	struct msm_gem_address_space *aspace;
+
+	if (!mmu)
+		return ERR_PTR(-EINVAL);
+
+	aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+	if (!aspace)
+		return ERR_PTR(-ENOMEM);
+
+	spin_lock_init(&aspace->lock);
+	aspace->dev = dev;
+	aspace->name = name;
+	aspace->mmu = mmu;
+	aspace->ops = &smmu_aspace_ops;
+	INIT_LIST_HEAD(&aspace->active_list);
+	INIT_LIST_HEAD(&aspace->clients);
+	kref_init(&aspace->kref);
+	mutex_init(&aspace->list_lock);
+
+	return aspace;
+}
+
 static void
 msm_gem_address_space_destroy(struct kref *kref)
 {
 	struct msm_gem_address_space *aspace = container_of(kref,
 			struct msm_gem_address_space, kref);
 
-	drm_mm_takedown(&aspace->mm);
-	if (aspace->mmu)
-		aspace->mmu->funcs->destroy(aspace->mmu);
+	if (aspace && aspace->ops->destroy)
+		aspace->ops->destroy(aspace);
+
 	kfree(aspace);
 }
 
@@ -38,9 +208,10 @@
 		kref_put(&aspace->kref, msm_gem_address_space_destroy);
 }
 
-void
-msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt)
+/* GPU address space operations */
+static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		unsigned int flags)
 {
 	if (!aspace || !vma->iova)
 		return;
@@ -59,9 +230,19 @@
 	msm_gem_address_space_put(aspace);
 }
 
-int
-msm_gem_map_vma(struct msm_gem_address_space *aspace,
-		struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+void
+msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		unsigned int flags)
+{
+	if (aspace && aspace->ops->unmap)
+		aspace->ops->unmap(aspace, vma, sgt, flags);
+}
+
+
+static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt,
+		int npages, unsigned int flags)
 {
 	int ret;
 
@@ -91,6 +272,19 @@
 	return ret;
 }
 
+static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+	drm_mm_takedown(&aspace->mm);
+	if (aspace->mmu)
+		aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
+	.map = iommu_aspace_map_vma,
+	.unmap = iommu_aspace_unmap_vma,
+	.destroy = iommu_aspace_destroy,
+};
+
 struct msm_gem_address_space *
 msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
 		const char *name)
@@ -106,6 +300,7 @@
 	spin_lock_init(&aspace->lock);
 	aspace->name = name;
 	aspace->mmu = msm_iommu_new(dev, domain);
+	aspace->ops = &msm_iommu_aspace_ops;
 
 	drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
 		size >> PAGE_SHIFT);
@@ -114,3 +309,65 @@
 
 	return aspace;
 }
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+		struct msm_gem_vma *vma, struct sg_table *sgt, int npages,
+		unsigned int flags)
+{
+	if (aspace && aspace->ops->map)
+		return aspace->ops->map(aspace, vma, sgt, npages, flags);
+
+	return -EINVAL;
+}
+
+struct device *msm_gem_get_aspace_device(struct msm_gem_address_space *aspace)
+{
+	struct device *client_dev = NULL;
+
+	if (aspace && aspace->mmu && aspace->mmu->funcs->get_dev)
+		client_dev = aspace->mmu->funcs->get_dev(aspace->mmu);
+
+	return client_dev;
+}
+
+void msm_gem_add_obj_to_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (aspace && aspace->ops && aspace->ops->add_to_active)
+		aspace->ops->add_to_active(aspace, msm_obj);
+}
+
+void msm_gem_remove_obj_from_aspace_active_list(
+		struct msm_gem_address_space *aspace,
+		struct drm_gem_object *obj)
+{
+	struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+	if (aspace && aspace->ops && aspace->ops->remove_from_active)
+		aspace->ops->remove_from_active(aspace, msm_obj);
+}
+
+int msm_gem_address_space_register_cb(struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	if (aspace && aspace->ops && aspace->ops->register_cb)
+		return aspace->ops->register_cb(aspace, cb, cb_data);
+
+	return -EINVAL;
+}
+
+int msm_gem_address_space_unregister_cb(struct msm_gem_address_space *aspace,
+		void (*cb)(void *, bool),
+		void *cb_data)
+{
+	if (aspace && aspace->ops && aspace->ops->unregister_cb)
+		return aspace->ops->unregister_cb(aspace, cb, cb_data);
+
+	return -EINVAL;
+}
+
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 93db1f5..dacb1c5 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
@@ -27,6 +26,19 @@
 
 #define MAX_PLANE	4
 
+/**
+ * Device Private DRM Mode Flags
+ * drm_mode->private_flags
+ */
+/* Connector has interpreted seamless transition request as dynamic fps */
+#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS	(1<<0)
+/* Transition to new mode requires a wait-for-vblank before the modeset */
+#define MSM_MODE_FLAG_VBLANK_PRE_MODESET	(1<<1)
+/* Request to switch the connector mode */
+#define MSM_MODE_FLAG_SEAMLESS_DMS			(1<<2)
+/* Request to switch the fps */
+#define MSM_MODE_FLAG_SEAMLESS_VRR			(1<<3)
+
 /* As there are different display controller blocks depending on the
  * snapdragon version, the kms support is split out and the appropriate
  * implementation is loaded at runtime.  The kms module is responsible
@@ -35,6 +47,7 @@
 struct msm_kms_funcs {
 	/* hw initialization: */
 	int (*hw_init)(struct msm_kms *kms);
+	int (*postinit)(struct msm_kms *kms);
 	/* irq handling: */
 	void (*irq_preinstall)(struct msm_kms *kms);
 	int (*irq_postinstall)(struct msm_kms *kms);
@@ -43,15 +56,31 @@
 	int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
 	void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
 	/* modeset, bracketing atomic_commit(): */
-	void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
-	void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+	void (*prepare_fence)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
+	void (*prepare_commit)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
+	void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+	void (*complete_commit)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
 	/* functions to wait for atomic commit completed on each CRTC */
 	void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
 					struct drm_crtc *crtc);
+	/* function pointer to wait for pixel transfer to panel to complete*/
+	void (*wait_for_tx_complete)(struct msm_kms *kms,
+					struct drm_crtc *crtc);
 	/* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
 	const struct msm_format *(*get_format)(struct msm_kms *kms,
 					const uint32_t format,
-					const uint64_t modifiers);
+					const uint64_t modifier);
+	/* do format checking on format modified through fb_cmd2 modifiers */
+	int (*check_modified_format)(const struct msm_kms *kms,
+			const struct msm_format *msm_fmt,
+			const struct drm_mode_fb_cmd2 *cmd,
+			struct drm_gem_object **bos);
+	/* perform complete atomic check of given atomic state */
+	int (*atomic_check)(struct msm_kms *kms,
+			struct drm_atomic_state *state);
 	/* misc: */
 	long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
 			struct drm_encoder *encoder);
@@ -59,15 +88,33 @@
 			struct drm_encoder *encoder,
 			struct drm_encoder *slave_encoder,
 			bool is_cmd_mode);
+	void (*postopen)(struct msm_kms *kms, struct drm_file *file);
+	void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+	void (*postclose)(struct msm_kms *kms, struct drm_file *file);
+	void (*lastclose)(struct msm_kms *kms,
+			struct drm_modeset_acquire_ctx *ctx);
+	int (*register_events)(struct msm_kms *kms,
+			struct drm_mode_object *obj, u32 event, bool en);
 	void (*set_encoder_mode)(struct msm_kms *kms,
 				 struct drm_encoder *encoder,
 				 bool cmd_mode);
+	/* pm suspend/resume hooks */
+	int (*pm_suspend)(struct device *dev);
+	int (*pm_resume)(struct device *dev);
 	/* cleanup: */
 	void (*destroy)(struct msm_kms *kms);
+	/* get address space */
+	struct msm_gem_address_space *(*get_address_space)(
+			struct msm_kms *kms,
+			unsigned int domain);
 #ifdef CONFIG_DEBUG_FS
 	/* debugfs: */
 	int (*debugfs_init)(struct msm_kms *kms, struct drm_minor *minor);
 #endif
+	/* handle continuous splash  */
+	int (*cont_splash_config)(struct msm_kms *kms);
+	/* check for continuous splash status */
+	bool (*check_for_splash)(struct msm_kms *kms);
 };
 
 struct msm_kms {
@@ -80,6 +127,18 @@
 	struct msm_gem_address_space *aspace;
 };
 
+/**
+ * Subclass of drm_atomic_state, to allow kms backend to have driver
+ * private global state.  The kms backend can do whatever it wants
+ * with the ->state ptr.  On ->atomic_state_clear() the ->state ptr
+ * is kfree'd and set back to NULL.
+ */
+struct msm_kms_state {
+	struct drm_atomic_state base;
+	void *state;
+};
+#define to_kms_state(x) container_of(x, struct msm_kms_state, base)
+
 static inline void msm_kms_init(struct msm_kms *kms,
 		const struct msm_kms_funcs *funcs)
 {
@@ -96,6 +155,7 @@
 struct msm_kms *mdp5_kms_init(struct drm_device *dev);
 int msm_mdss_init(struct drm_device *dev);
 void msm_mdss_destroy(struct drm_device *dev);
+struct msm_kms *mdp5_kms_init(struct drm_device *dev);
 int msm_mdss_enable(struct msm_mdss *mdss);
 int msm_mdss_disable(struct msm_mdss *mdss);
 #else
@@ -119,4 +179,39 @@
 	return 0;
 }
 #endif
+
+struct msm_kms *sde_kms_init(struct drm_device *dev);
+
+
+/**
+ * Mode Set Utility Functions
+ */
+static inline bool msm_is_mode_seamless(const struct drm_display_mode *mode)
+{
+	return (mode->flags & DRM_MODE_FLAG_SEAMLESS);
+}
+
+static inline bool msm_is_mode_seamless_dms(const struct drm_display_mode *mode)
+{
+	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DMS)
+		: false;
+}
+
+static inline bool msm_is_mode_dynamic_fps(const struct drm_display_mode *mode)
+{
+	return ((mode->flags & DRM_MODE_FLAG_SEAMLESS) &&
+		(mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS));
+}
+
+static inline bool msm_is_mode_seamless_vrr(const struct drm_display_mode *mode)
+{
+	return mode ? (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_VRR)
+		: false;
+}
+
+static inline bool msm_needs_vblank_pre_modeset(
+		const struct drm_display_mode *mode)
+{
+	return (mode->private_flags & MSM_MODE_FLAG_VBLANK_PRE_MODESET);
+}
 #endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index aa2c5d4..ee6cbcd 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -20,14 +20,40 @@
 
 #include <linux/iommu.h>
 
+struct msm_mmu;
+
+enum msm_mmu_domain_type {
+	MSM_SMMU_DOMAIN_UNSECURE,
+	MSM_SMMU_DOMAIN_NRT_UNSECURE,
+	MSM_SMMU_DOMAIN_SECURE,
+	MSM_SMMU_DOMAIN_NRT_SECURE,
+	MSM_SMMU_DOMAIN_MAX,
+};
+
 struct msm_mmu_funcs {
 	int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
 	void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
 	int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
-			unsigned len, int prot);
+			unsigned int len, int prot);
 	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
-			unsigned len);
+			unsigned int len);
+	int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
+			enum dma_data_direction dir);
+	void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
+		enum dma_data_direction dir);
+	int (*map_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
+			int dir, u32 flags);
+	void (*unmap_dma_buf)(struct msm_mmu *mmu, struct sg_table *sgt,
+			int dir, u32 flags);
 	void (*destroy)(struct msm_mmu *mmu);
+	bool (*is_domain_secure)(struct msm_mmu *mmu);
+	int (*set_attribute)(struct msm_mmu *mmu,
+			enum iommu_attr attr, void *data);
+	int (*one_to_one_map)(struct msm_mmu *mmu, uint32_t iova,
+			uint32_t dest_address, uint32_t size, int prot);
+	int (*one_to_one_unmap)(struct msm_mmu *mmu, uint32_t dest_address,
+					uint32_t size);
+	struct device *(*get_dev)(struct msm_mmu *mmu);
 };
 
 struct msm_mmu {
@@ -45,7 +71,8 @@
 }
 
 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+struct msm_mmu *msm_smmu_new(struct device *dev,
+	enum msm_mmu_domain_type domain);
 
 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
 		int (*handler)(void *arg, unsigned long iova, int flags))
@@ -54,4 +81,8 @@
 	mmu->handler = handler;
 }
 
+/* SDE smmu driver initialize and cleanup functions */
+int __init msm_smmu_driver_init(void);
+void __exit msm_smmu_driver_cleanup(void);
+
 #endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index f18885d..b7ed1ab 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -1,4 +1,3 @@
-// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
@@ -174,13 +173,12 @@
 {
 	struct msm_smmu *smmu = to_msm_smmu(mmu);
 	struct msm_smmu_client *client = msm_smmu_to_client(smmu);
-	size_t ret;
-
-	ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl,
-			sgt->nents, prot);
-	WARN_ON((int)ret < 0);
+	size_t ret = 0;
 
 	if (sgt && sgt->sgl) {
+		ret = iommu_map_sg(client->mmu_mapping->domain, iova, sgt->sgl,
+				sgt->nents, prot);
+		WARN_ON((int)ret < 0);
 		DRM_DEBUG("%pad/0x%x/0x%x/\n", &sgt->sgl->dma_address,
 				sgt->sgl->dma_length, prot);
 		SDE_EVT32(sgt->sgl->dma_address, sgt->sgl->dma_length,
@@ -409,11 +407,19 @@
 {
 	struct msm_smmu *smmu;
 	struct device *client_dev;
+	bool smmu_full_map;
 
 	smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
 	if (!smmu)
 		return ERR_PTR(-ENOMEM);
 
+	smmu_full_map = of_property_read_bool(dev->of_node,
+					"qcom,fullsize-va-map");
+	if (smmu_full_map) {
+		msm_smmu_domains[domain].va_start = SZ_128K;
+		msm_smmu_domains[domain].va_size = SZ_4G - SZ_128K;
+	}
+
 	client_dev = msm_smmu_device_create(dev, domain, smmu);
 	if (IS_ERR(client_dev)) {
 		kfree(smmu);
@@ -490,6 +496,13 @@
 		}
 	}
 
+	if (!client->dev->dma_parms)
+		client->dev->dma_parms = devm_kzalloc(client->dev,
+				sizeof(*client->dev->dma_parms), GFP_KERNEL);
+
+	dma_set_max_seg_size(client->dev, DMA_BIT_MASK(32));
+	dma_set_seg_boundary(client->dev, DMA_BIT_MASK(64));
+
 	iommu_set_fault_handler(client->mmu_mapping->domain,
 			msm_smmu_fault_handler, (void *)client);
 
@@ -566,6 +579,7 @@
 	.driver = {
 		.name = "msmdrm_smmu",
 		.of_match_table = msm_smmu_dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 6aa23e1..3a1b6c2 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1099,7 +1099,7 @@
 
 		/* convert fb val to drm framebuffer and prepare it */
 		c_state->out_fb =
-			drm_framebuffer_lookup(connector->dev, val);
+			drm_framebuffer_lookup(connector->dev, NULL, val);
 		if (!c_state->out_fb && val) {
 			SDE_ERROR("failed to look up fb %lld\n", val);
 			rc = -EFAULT;
@@ -2129,7 +2129,7 @@
 
 	mutex_init(&c_conn->lock);
 
-	rc = drm_mode_connector_attach_encoder(&c_conn->base, encoder);
+	rc = drm_connector_attach_encoder(&c_conn->base, encoder);
 	if (rc) {
 		SDE_ERROR("failed to attach encoder to connector, %d\n", rc);
 		goto error_cleanup_fence;
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index ddc8a45..1630b09 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -83,7 +83,8 @@
 			!sde_kms->hw_intr->ops.irq_idx_lookup)
 		return -EINVAL;
 
-	return sde_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+	return sde_kms->hw_intr->ops.irq_idx_lookup(
+			sde_kms->hw_intr, intr_type,
 			instance_idx);
 }
 
@@ -104,7 +105,7 @@
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
 		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -171,7 +172,7 @@
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
 		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -231,7 +232,7 @@
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
 		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -304,7 +305,7 @@
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
 		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -339,7 +340,7 @@
 		return -EINVAL;
 	}
 
-	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+	if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->sde_irq_map_size) {
 		SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -482,7 +483,7 @@
 	spin_lock_init(&sde_kms->irq_obj.cb_lock);
 
 	/* Create irq callbacks for all possible irq_idx */
-	sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->irq_idx_tbl_size;
+	sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->sde_irq_map_size;
 	sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
 			sizeof(struct list_head), GFP_KERNEL);
 	sde_kms->irq_obj.enable_counts = kcalloc(sde_kms->irq_obj.total_irqs,
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 82eb0eb..5fd5531 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -1,4 +1,3 @@
-// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
@@ -1212,7 +1211,7 @@
 	is_crtc_roi_dirty = sde_crtc_is_crtc_roi_dirty(state);
 	is_any_conn_roi_dirty = false;
 
-	for_each_connector_in_state(state->state, conn, conn_state, i) {
+	for_each_new_connector_in_state(state->state, conn, conn_state, i) {
 		struct sde_connector *sde_conn;
 		struct sde_connector_state *sde_conn_state;
 		struct sde_rect conn_roi;
@@ -1302,7 +1301,7 @@
 		return 0;
 
 	/* partial update active, check if autorefresh is also requested */
-	for_each_connector_in_state(state->state, conn, conn_state, i) {
+	for_each_new_connector_in_state(state->state, conn, conn_state, i) {
 		uint64_t autorefresh;
 
 		if (!conn_state || conn_state->crtc != crtc)
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index c029740..3ffe728 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 8bb6c88..00159f0 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1,4 +1,3 @@
-// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
@@ -1895,7 +1894,8 @@
 			SDE_EVTLOG_FUNC_ENTRY);
 
 	if (crtc->base.id != wait_vblank_crtc_id) {
-		primary_crtc = drm_crtc_find(drm_enc->dev, wait_vblank_crtc_id);
+		primary_crtc = drm_crtc_find(drm_enc->dev,
+					NULL, wait_vblank_crtc_id);
 		if (!primary_crtc) {
 			SDE_ERROR_ENC(sde_enc,
 					"failed to find primary crtc id %d\n",
@@ -4125,24 +4125,27 @@
 	return 0;
 }
 
-static void sde_encoder_vsync_event_handler(unsigned long data)
+static void sde_encoder_vsync_event_handler(struct timer_list *t)
 {
-	struct drm_encoder *drm_enc = (struct drm_encoder *) data;
-	struct sde_encoder_virt *sde_enc;
+	struct drm_encoder *drm_enc;
+	struct sde_encoder_virt *sde_enc =
+			from_timer(sde_enc, t, vsync_event_timer);
 	struct msm_drm_private *priv;
 	struct msm_drm_thread *event_thread;
 
+	if (!sde_enc || !sde_enc->crtc) {
+		SDE_ERROR("invalid encoder parameters %d\n", !sde_enc);
+		return;
+	}
+
+	drm_enc = &sde_enc->base;
+
 	if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
 		SDE_ERROR("invalid encoder parameters\n");
 		return;
 	}
 
-	sde_enc = to_sde_encoder_virt(drm_enc);
 	priv = drm_enc->dev->dev_private;
-	if (!sde_enc->crtc) {
-		SDE_ERROR("invalid crtc");
-		return;
-	}
 
 	if (sde_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
 		SDE_ERROR("invalid crtc index:%u\n",
@@ -5123,9 +5126,8 @@
 
 	if ((disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) &&
 			disp_info->is_primary)
-		setup_timer(&sde_enc->vsync_event_timer,
-				sde_encoder_vsync_event_handler,
-				(unsigned long)sde_enc);
+		timer_setup(&sde_enc->vsync_event_timer,
+				sde_encoder_vsync_event_handler, 0);
 
 	snprintf(name, SDE_NAME_SIZE, "rsc_enc%u", drm_enc->base.id);
 	sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name,
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
index e89c0fb..7c4518f 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index bcaeb3d..70ea28f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -2140,7 +2140,7 @@
 
 		pdev = of_find_device_by_node(phargs.np);
 		if (pdev) {
-			slice = llcc_slice_getd(&pdev->dev, "rotator");
+			slice = llcc_slice_getd(pdev->id);
 			if (IS_ERR_OR_NULL(slice)) {
 				rot->pdev = NULL;
 				SDE_ERROR("failed to get system cache %ld\n",
@@ -3559,13 +3559,16 @@
 
 static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
 {
-	int rc = 0;
+	int i, rc = 0;
 
 	if (!sde_cfg)
 		return -EINVAL;
 
 	rc = sde_hardware_format_caps(sde_cfg, hw_rev);
 
+	for (i = 0; i < MDSS_INTR_MAX; i++)
+		set_bit(i, sde_cfg->mdss_irqs);
+
 	if (IS_MSM8996_TARGET(hw_rev)) {
 		sde_cfg->perf.min_prefill_lines = 21;
 	} else if (IS_MSM8998_TARGET(hw_rev)) {
@@ -3617,6 +3620,19 @@
 		sde_cfg->sui_ns_allowed = true;
 		sde_cfg->sui_misr_supported = true;
 		sde_cfg->sui_block_xin_mask = 0x2EE1;
+	} else if (IS_KONA_TARGET(hw_rev)) {
+		sde_cfg->has_cwb_support = true;
+		sde_cfg->has_wb_ubwc = true;
+		sde_cfg->has_qsync = true;
+		sde_cfg->perf.min_prefill_lines = 24;
+		sde_cfg->vbif_qos_nlvl = 8;
+		sde_cfg->ts_prefill_rev = 2;
+		sde_cfg->ctl_rev = SDE_CTL_CFG_VERSION_1_0_0;
+		sde_cfg->delay_prg_fetch_start = true;
+		sde_cfg->sui_ns_allowed = true;
+		sde_cfg->sui_misr_supported = true;
+		sde_cfg->sui_block_xin_mask = 0x3F71;
+		sde_cfg->has_3d_merge_reset = true;
 	} else {
 		SDE_ERROR("unsupported chipset id:%X\n", hw_rev);
 		sde_cfg->perf.min_prefill_lines = 0xffff;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 31a4ea3..b3d94c4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -47,6 +47,7 @@
 #define SDE_HW_VER_501	SDE_HW_VER(5, 0, 1) /* sm8150 v2.0 */
 #define SDE_HW_VER_510	SDE_HW_VER(5, 1, 0) /* sdmshrike v1.0 */
 #define SDE_HW_VER_530	SDE_HW_VER(5, 3, 0) /* sm6150 v1.0 */
+#define SDE_HW_VER_600	SDE_HW_VER(6, 0, 0) /* kona */
 
 #define IS_MSM8996_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_170)
 #define IS_MSM8998_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_300)
@@ -55,6 +56,7 @@
 #define IS_SM8150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_500)
 #define IS_SDMSHRIKE_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_510)
 #define IS_SM6150_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_530)
+#define IS_KONA_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_600)
 
 #define SDE_HW_BLK_NAME_LEN	16
 
@@ -99,6 +101,27 @@
 		IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_UBWC_VER_30)
 
 /**
+ * SDE INTERRUPTS - maintains the possible hw irq's allowed by HW
+ * The order in this enum must match the order of the irqs defined
+ * by 'sde_irq_map'
+ */
+enum sde_intr_enum {
+	MDSS_INTR_SSPP_TOP0_INTR,
+	MDSS_INTR_SSPP_TOP0_INTR2,
+	MDSS_INTF_TEAR_1_INTR,
+	MDSS_INTF_TEAR_2_INTR,
+	MDSS_INTR_SSPP_TOP0_HIST_INTR,
+	MDSS_INTR_INTF_0_INTR,
+	MDSS_INTR_INTF_1_INTR,
+	MDSS_INTR_INTF_2_INTR,
+	MDSS_INTR_INTF_3_INTR,
+	MDSS_INTR_INTF_4_INTR,
+	MDSS_INTR_AD4_0_INTR,
+	MDSS_INTR_AD4_1_INTR,
+	MDSS_INTR_MAX
+};
+
+/**
  * MDP TOP BLOCK features
  * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
  * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
@@ -1060,6 +1083,7 @@
  * @sui_ns_allowed      flag to indicate non-secure context banks are allowed
  *                         during secure-ui session
  * @sui_supported_blendstage  secure-ui supported blendstage
+ * @mdss_irqs	  bitmap with the irqs supported by the target
  */
 struct sde_mdss_cfg {
 	u32 hwversion;
@@ -1167,6 +1191,8 @@
 	struct sde_format_extended *vig_formats;
 	struct sde_format_extended *wb_formats;
 	struct sde_format_extended *virt_vig_formats;
+
+	DECLARE_BITMAP(mdss_irqs, MDSS_INTR_MAX);
 };
 
 struct sde_mdss_hw_cfg_handler {
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 7c986a6..46f3ab7 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -194,11 +194,18 @@
  * @clr_off:	offset to CLEAR reg
  * @en_off:	offset to ENABLE reg
  * @status_off:	offset to STATUS reg
+ * @sde_irq_idx;	global index in the 'sde_irq_map' table,
+ *		to know which interrupt type, instance, mask, etc. to use
+ * @map_idx_start   first offset in the sde_irq_map table
+ * @map_idx_end    last offset in the sde_irq_map table
  */
 struct sde_intr_reg {
 	u32 clr_off;
 	u32 en_off;
 	u32 status_off;
+	int sde_irq_idx;
+	u32 map_idx_start;
+	u32 map_idx_end;
 };
 
 /**
@@ -206,697 +213,333 @@
  * @intr_type:		type of interrupt listed in sde_intr_type
  * @instance_idx:	instance index of the associated HW block in SDE
  * @irq_mask:		corresponding bit in the interrupt status reg
- * @reg_idx:		which reg set to use
+ * @reg_idx:		index in the 'sde_irq_tbl' table, to know which
+ *		registers offsets to use. -1 = invalid offset
  */
 struct sde_irq_type {
 	u32 intr_type;
 	u32 instance_idx;
 	u32 irq_mask;
-	u32 reg_idx;
+	int reg_idx;
 };
 
 /**
- * List of SDE interrupt registers
- */
-static const struct sde_intr_reg sde_intr_set[] = {
-	{
-		MDP_SSPP_TOP0_OFF+INTR_CLEAR,
-		MDP_SSPP_TOP0_OFF+INTR_EN,
-		MDP_SSPP_TOP0_OFF+INTR_STATUS
-	},
-	{
-		MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
-		MDP_SSPP_TOP0_OFF+INTR2_EN,
-		MDP_SSPP_TOP0_OFF+INTR2_STATUS
-	},
-	{
-		MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
-		MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
-		MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
-	},
-	{
-		MDP_INTF_0_OFF+INTF_INTR_CLEAR,
-		MDP_INTF_0_OFF+INTF_INTR_EN,
-		MDP_INTF_0_OFF+INTF_INTR_STATUS
-	},
-	{
-		MDP_INTF_1_OFF+INTF_INTR_CLEAR,
-		MDP_INTF_1_OFF+INTF_INTR_EN,
-		MDP_INTF_1_OFF+INTF_INTR_STATUS
-	},
-	{
-		MDP_INTF_2_OFF+INTF_INTR_CLEAR,
-		MDP_INTF_2_OFF+INTF_INTR_EN,
-		MDP_INTF_2_OFF+INTF_INTR_STATUS
-	},
-	{
-		MDP_INTF_3_OFF+INTF_INTR_CLEAR,
-		MDP_INTF_3_OFF+INTF_INTR_EN,
-		MDP_INTF_3_OFF+INTF_INTR_STATUS
-	},
-	{
-		MDP_INTF_4_OFF+INTF_INTR_CLEAR,
-		MDP_INTF_4_OFF+INTF_INTR_EN,
-		MDP_INTF_4_OFF+INTF_INTR_STATUS
-	},
-	{
-		MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
-		MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
-		MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
-	},
-	{
-		MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
-		MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
-		MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
-	},
-	{
-		MDP_INTF_TEAR_INTF_1_IRQ_OFF + MDP_INTF_TEAR_INTR_CLEAR_OFF,
-		MDP_INTF_TEAR_INTF_1_IRQ_OFF + MDP_INTF_TEAR_INTR_EN_OFF,
-		MDP_INTF_TEAR_INTF_1_IRQ_OFF + MDP_INTF_TEAR_INTR_STATUS_OFF,
-	},
-	{
-		MDP_INTF_TEAR_INTF_2_IRQ_OFF + MDP_INTF_TEAR_INTR_CLEAR_OFF,
-		MDP_INTF_TEAR_INTF_2_IRQ_OFF + MDP_INTF_TEAR_INTR_EN_OFF,
-		MDP_INTF_TEAR_INTF_2_IRQ_OFF + MDP_INTF_TEAR_INTR_STATUS_OFF,
-	}
-};
-
-/**
- * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * IRQ mapping tables - use for lookup an irq_idx in this table that have
  *                     a matching interface type and instance index.
+ * Each of these tables are copied to a dynamically allocated
+ * table, that will be used to service each of the irqs
  */
-static const struct sde_irq_type sde_irq_map[] = {
-	/* BEGIN MAP_RANGE: 0-31, INTR */
-	/* irq_idx: 0-3 */
-	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, 0},
+static struct sde_irq_type sde_irq_intr_map[] = {
+
+	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, -1},
 	{ SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, 0},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, 0},
-	/* irq_idx: 4-7 */
-	{ SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, 0},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, 0},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, 0},
-	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, 0},
-	/* irq_idx: 8-11 */
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, -1},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, -1},
+
+	{ SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, -1},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, -1},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, -1},
+	{ SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, -1},
+
 	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_DONE, 0},
+		SDE_INTR_PING_PONG_0_DONE, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_DONE, 0},
+		SDE_INTR_PING_PONG_1_DONE, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_DONE, 0},
+		SDE_INTR_PING_PONG_2_DONE, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_DONE, 0},
-	/* irq_idx: 12-15 */
+		SDE_INTR_PING_PONG_3_DONE, -1},
+
 	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_RD_PTR, 0},
+		SDE_INTR_PING_PONG_0_RD_PTR, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_RD_PTR, 0},
+		SDE_INTR_PING_PONG_1_RD_PTR, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_RD_PTR, 0},
+		SDE_INTR_PING_PONG_2_RD_PTR, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_RD_PTR, 0},
-	/* irq_idx: 16-19 */
+		SDE_INTR_PING_PONG_3_RD_PTR, -1},
+
 	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_WR_PTR, 0},
+		SDE_INTR_PING_PONG_0_WR_PTR, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_WR_PTR, 0},
+		SDE_INTR_PING_PONG_1_WR_PTR, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_WR_PTR, 0},
+		SDE_INTR_PING_PONG_2_WR_PTR, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_WR_PTR, 0},
-	/* irq_idx: 20-23 */
+		SDE_INTR_PING_PONG_3_WR_PTR, -1},
+
 	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+		SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+		SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+		SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, -1},
 	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
-	/* irq_idx: 24-27 */
-	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, 0},
-	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, 0},
-	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, 0},
-	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, 0},
-	/* irq_idx: 28-31 */
-	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, 0},
-	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, 0},
-	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, 0},
-	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, 0},
+		SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, -1},
 
-	/* BEGIN MAP_RANGE: 32-64, INTR2 */
-	/* irq_idx: 32-35 */
-	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	/* irq_idx: 36-39 */
-	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_WR_PTR, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	/* irq_idx: 40 */
-	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_RD_PTR, 1},
-	/* irq_idx: 41-46 */
-	{ SDE_IRQ_TYPE_CTL_START, CTL_0,
-		SDE_INTR_CTL_0_START, 1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_1,
-		SDE_INTR_CTL_1_START, 1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_2,
-		SDE_INTR_CTL_2_START, 1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_3,
-		SDE_INTR_CTL_3_START, 1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_4,
-		SDE_INTR_CTL_4_START, 1},
-	{ SDE_IRQ_TYPE_CTL_START, CTL_5,
-		SDE_INTR_CTL_5_START, 1},
-	/* irq_idx: 47-48 */
-	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
-	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
-	/* irq_idx: 49-52 */
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_TEAR_DETECTED, 1},
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_TEAR_DETECTED, 1},
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_TEAR_DETECTED, 1},
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_TEAR_DETECTED, 1},
-	/* irq_idx: 53-54 */
-	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, 1},
-	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, 1},
-	/* irq_idx: 55 */
-	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
-	/* irq_idx: 56-59 */
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
-		SDE_INTR_PING_PONG_0_TE_DETECTED, 1},
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
-		SDE_INTR_PING_PONG_1_TE_DETECTED, 1},
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
-		SDE_INTR_PING_PONG_2_TE_DETECTED, 1},
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
-		SDE_INTR_PING_PONG_3_TE_DETECTED, 1},
-	/* irq_idx: 60-63 */
-	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
-		SDE_INTR_PING_PONG_S0_TE_DETECTED, 1},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
-	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
-		SDE_INTR_PING_PONG_4_DONE, 1},
-	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
-		SDE_INTR_PING_PONG_5_DONE, 1},
+	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, -1},
+	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, -1},
+	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, -1},
+	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, -1},
 
-	/* BEGIN MAP_RANGE: 64-95 HIST */
-	/* irq_idx: 64-67 */
-	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
-		SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	/* irq_idx: 68-71 */
-	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
-		SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	/* irq_idx: 72-75 */
-	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
-		SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
-		SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
-	/* irq_idx: 76-79 */
-	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
-		SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	/* irq_idx: 80-83 */
-	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
-		SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	/* irq_idx: 84-87 */
-	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
-		SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2},
-	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
-		SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
-	/* irq_idx: 88-91 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	/* irq_idx: 92-95 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
-
-	/* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
-	/* irq_idx: 96-99 */
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
-		SDE_INTR_VIDEO_INTO_STATIC, 3},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
-		SDE_INTR_VIDEO_OUTOF_STATIC, 3},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
-		SDE_INTR_DSICMD_0_INTO_STATIC, 3},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, 3},
-	/* irq_idx: 100-103 */
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
-		SDE_INTR_DSICMD_1_INTO_STATIC, 3},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, 3},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
-		SDE_INTR_DSICMD_2_INTO_STATIC, 3},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, 3},
-	/* irq_idx: 104-107 */
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	/* irq_idx: 108-111 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	/* irq_idx: 112-115 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	/* irq_idx: 116-119 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	/* irq_idx: 120-123 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	/* irq_idx: 124-127 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
-
-	/* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
-	/* irq_idx: 128-131 */
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
-		SDE_INTR_VIDEO_INTO_STATIC, 4},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
-		SDE_INTR_VIDEO_OUTOF_STATIC, 4},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
-		SDE_INTR_DSICMD_0_INTO_STATIC, 4},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, 4},
-	/* irq_idx: 132-135 */
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
-		SDE_INTR_DSICMD_1_INTO_STATIC, 4},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, 4},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
-		SDE_INTR_DSICMD_2_INTO_STATIC, 4},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, 4},
-	/* irq_idx: 136-139 */
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	/* irq_idx: 140-143 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	/* irq_idx: 144-147 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	/* irq_idx: 148-151 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	/* irq_idx: 152-155 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	/* irq_idx: 156-159 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
-
-	/* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
-	/* irq_idx: 160-163 */
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
-		SDE_INTR_VIDEO_INTO_STATIC, 5},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
-		SDE_INTR_VIDEO_OUTOF_STATIC, 5},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
-		SDE_INTR_DSICMD_0_INTO_STATIC, 5},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, 5},
-	/* irq_idx: 164-167 */
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
-		SDE_INTR_DSICMD_1_INTO_STATIC, 5},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, 5},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
-		SDE_INTR_DSICMD_2_INTO_STATIC, 5},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, 5},
-	/* irq_idx: 168-171 */
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	/* irq_idx: 172-175 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	/* irq_idx: 176-179 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	/* irq_idx: 180-183 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	/* irq_idx: 184-187 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	/* irq_idx: 188-191 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
-
-	/* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
-	/* irq_idx: 192-195 */
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
-		SDE_INTR_VIDEO_INTO_STATIC, 6},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
-		SDE_INTR_VIDEO_OUTOF_STATIC, 6},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
-		SDE_INTR_DSICMD_0_INTO_STATIC, 6},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, 6},
-	/* irq_idx: 196-199 */
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
-		SDE_INTR_DSICMD_1_INTO_STATIC, 6},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, 6},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
-		SDE_INTR_DSICMD_2_INTO_STATIC, 6},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, 6},
-	/* irq_idx: 200-203 */
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	/* irq_idx: 204-207 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	/* irq_idx: 208-211 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	/* irq_idx: 212-215 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	/* irq_idx: 216-219 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	/* irq_idx: 220-223 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
-
-	/* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
-	/* irq_idx: 224-227 */
-	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
-		SDE_INTR_VIDEO_INTO_STATIC, 7},
-	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
-		SDE_INTR_VIDEO_OUTOF_STATIC, 7},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
-		SDE_INTR_DSICMD_0_INTO_STATIC, 7},
-	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
-		SDE_INTR_DSICMD_0_OUTOF_STATIC, 7},
-	/* irq_idx: 228-231 */
-	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
-		SDE_INTR_DSICMD_1_INTO_STATIC, 7},
-	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
-		SDE_INTR_DSICMD_1_OUTOF_STATIC, 7},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
-		SDE_INTR_DSICMD_2_INTO_STATIC, 7},
-	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
-		SDE_INTR_DSICMD_2_OUTOF_STATIC, 7},
-	/* irq_idx: 232-235 */
-	{ SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	/* irq_idx: 236-239 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	/* irq_idx: 240-243 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	/* irq_idx: 244-247 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	/* irq_idx: 248-251 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	/* irq_idx: 252-255 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
-
-	/* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
-	/* irq_idx: 256-259 */
-	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	/* irq_idx: 260-263 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	/* irq_idx: 264-267 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	/* irq_idx: 268-271 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	/* irq_idx: 272-275 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	/* irq_idx: 276-279 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	/* irq_idx: 280-283 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	/* irq_idx: 284-287 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 8},
-
-	/* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
-	/* irq_idx: 288-291 */
-	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	/* irq_idx: 292-295 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	/* irq_idx: 296-299 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	/* irq_idx: 300-303 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	/* irq_idx: 304-307 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	/* irq_idx: 308-311 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	/* irq_idx: 312-315 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	/* irq_idx: 315-319 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 9},
-
-	/* BEGIN MAP_RANGE: 320-351 INTF_1_TEAR INTR */
-	/* irq_idx: 320-322 */
-	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_1,
-		SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, 10},
-	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_1,
-		SDE_INTR_INTF_TEAR_WR_PTR, 10},
-	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_1,
-		SDE_INTR_INTF_TEAR_RD_PTR, 10},
-	/* irq_idx: 323 */
-	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_1,
-		SDE_INTR_INTF_TEAR_TEAR_DETECTED, 10},
-	/* irq_idx: 324-327 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	/* irq_idx: 328-331 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	/* irq_idx: 332-335 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	/* irq_idx: 336-339 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	/* irq_idx: 340-343 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	/* irq_idx: 344-347 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	/* irq_idx: 348-351 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 10},
-
-	/* BEGIN MAP_RANGE: 352-383 INTF_2_TEAR INTR */
-	/* irq_idx: 352-354 */
-	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_2,
-		SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, 11},
-	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_2,
-		SDE_INTR_INTF_TEAR_WR_PTR, 11},
-	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_2,
-		SDE_INTR_INTF_TEAR_RD_PTR, 11},
-	/* irq_idx: 355 */
-	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_2,
-		SDE_INTR_INTF_TEAR_TEAR_DETECTED, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	/* irq_idx: 356-359 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	/* irq_idx: 360-363 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	/* irq_idx: 364-367 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	/* irq_idx: 368-371 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	/* irq_idx: 372-375 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	/* irq_idx: 376-379 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	/* irq_idx: 380-383 */
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
-	{ SDE_IRQ_TYPE_RESERVED, 0, 0, 11},
+	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, -1},
+	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, -1},
+	{ SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, -1},
+	{ SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, -1},
 };
 
-static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type,
-		u32 instance_idx)
+static struct sde_irq_type sde_irq_intr2_map[] = {
+
+	{ SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, -1},
+
+	{ SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_WR_PTR, -1},
+
+	{ SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_RD_PTR, -1},
+
+	{ SDE_IRQ_TYPE_CTL_START, CTL_0,
+		SDE_INTR_CTL_0_START, -1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_1,
+		SDE_INTR_CTL_1_START, -1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_2,
+		SDE_INTR_CTL_2_START, -1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_3,
+		SDE_INTR_CTL_3_START, -1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_4,
+		SDE_INTR_CTL_4_START, -1},
+	{ SDE_IRQ_TYPE_CTL_START, CTL_5,
+		SDE_INTR_CTL_5_START, -1},
+
+	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, -1},
+	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, -1},
+
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+		SDE_INTR_PING_PONG_0_TEAR_DETECTED, -1},
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+		SDE_INTR_PING_PONG_1_TEAR_DETECTED, -1},
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+		SDE_INTR_PING_PONG_2_TEAR_DETECTED, -1},
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+		SDE_INTR_PING_PONG_3_TEAR_DETECTED, -1},
+
+	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_4, SDE_INTR_CWB_4_OVERFLOW, -1},
+	{ SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_5, SDE_INTR_CWB_5_OVERFLOW, -1},
+
+	{ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_TEAR_DETECTED, -1},
+
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+		SDE_INTR_PING_PONG_0_TE_DETECTED, -1},
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+		SDE_INTR_PING_PONG_1_TE_DETECTED, -1},
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+		SDE_INTR_PING_PONG_2_TE_DETECTED, -1},
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+		SDE_INTR_PING_PONG_3_TE_DETECTED, -1},
+
+	{ SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+		SDE_INTR_PING_PONG_S0_TE_DETECTED, -1},
+
+	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_4,
+		SDE_INTR_PING_PONG_4_DONE, -1},
+	{ SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_5,
+		SDE_INTR_PING_PONG_5_DONE, -1},
+};
+
+static struct sde_irq_type sde_irq_hist_map[] = {
+
+	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+		SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, -1},
+
+	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+		SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, -1},
+
+	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+		SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+		SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, -1},
+
+	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+		SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, -1},
+
+	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+		SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, -1},
+
+	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+		SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, -1},
+	{ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+		SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, -1},
+};
+
+static struct sde_irq_type sde_irq_intf0_map[] = {
+
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+		SDE_INTR_VIDEO_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, -1},
+};
+
+static struct sde_irq_type sde_irq_inf1_map[] = {
+
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+		SDE_INTR_VIDEO_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, -1},
+};
+
+static struct sde_irq_type sde_irq_intf2_map[] = {
+
+
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+		SDE_INTR_VIDEO_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, -1},
+};
+
+static struct sde_irq_type sde_irq_intf3_map[] = {
+
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+		SDE_INTR_VIDEO_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, -1},
+};
+
+static struct sde_irq_type sde_irq_inf4_map[] = {
+
+	{ SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+		SDE_INTR_VIDEO_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+		SDE_INTR_VIDEO_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+		SDE_INTR_DSICMD_0_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+		SDE_INTR_DSICMD_0_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+		SDE_INTR_DSICMD_1_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+		SDE_INTR_DSICMD_1_OUTOF_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+		SDE_INTR_DSICMD_2_INTO_STATIC, -1},
+	{ SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+		SDE_INTR_DSICMD_2_OUTOF_STATIC, -1},
+
+	{ SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, -1},
+};
+
+static struct sde_irq_type sde_irq_ad4_0_map[] = {
+
+	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_0, SDE_INTR_BACKLIGHT_UPDATED, -1},
+};
+
+static struct sde_irq_type sde_irq_ad4_1_map[] = {
+
+	{ SDE_IRQ_TYPE_AD4_BL_DONE, DSPP_1, SDE_INTR_BACKLIGHT_UPDATED, -1},
+};
+
+static struct sde_irq_type sde_irq_intf1_te_map[] = {
+
+	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_1,
+		SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
+	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_1,
+		SDE_INTR_INTF_TEAR_WR_PTR, -1},
+	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_1,
+		SDE_INTR_INTF_TEAR_RD_PTR, -1},
+	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_1,
+		SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
+};
+
+static struct sde_irq_type sde_irq_intf2_te_map[] = {
+
+	{ SDE_IRQ_TYPE_INTF_TEAR_AUTO_REF, INTF_2,
+		SDE_INTR_INTF_TEAR_AUTOREFRESH_DONE, -1},
+	{ SDE_IRQ_TYPE_INTF_TEAR_WR_PTR, INTF_2,
+		SDE_INTR_INTF_TEAR_WR_PTR, -1},
+	{ SDE_IRQ_TYPE_INTF_TEAR_RD_PTR, INTF_2,
+		SDE_INTR_INTF_TEAR_RD_PTR, -1},
+
+	{ SDE_IRQ_TYPE_INTF_TEAR_TEAR_CHECK, INTF_2,
+		SDE_INTR_INTF_TEAR_TEAR_DETECTED, -1},
+};
+
+static int sde_hw_intr_irqidx_lookup(struct sde_hw_intr *intr,
+	enum sde_intr_type intr_type, u32 instance_idx)
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(sde_irq_map); i++) {
-		if (intr_type == sde_irq_map[i].intr_type &&
-			instance_idx == sde_irq_map[i].instance_idx)
+	for (i = 0; i < intr->sde_irq_map_size; i++) {
+		if (intr_type == intr->sde_irq_map[i].intr_type &&
+			instance_idx == intr->sde_irq_map[i].instance_idx)
 			return i;
 	}
 
@@ -927,6 +570,7 @@
 	int end_idx;
 	u32 irq_status;
 	unsigned long irq_flags;
+	int sde_irq_idx;
 
 	if (!intr)
 		return;
@@ -937,18 +581,23 @@
 	 * irq lookup index.
 	 */
 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
-	for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) {
+	for (reg_idx = 0; reg_idx < intr->sde_irq_size; reg_idx++) {
 		irq_status = intr->save_irq_status[reg_idx];
 
-		/*
-		 * Each Interrupt register has a range of 32 indexes, and
-		 * that is static for sde_irq_map.
-		 */
-		start_idx = reg_idx * 32;
-		end_idx = start_idx + 32;
+		/* get the global offset in 'sde_irq_map' */
+		sde_irq_idx = intr->sde_irq_tbl[reg_idx].sde_irq_idx;
+		if (sde_irq_idx < 0)
+			continue;
 
-		if (start_idx >= ARRAY_SIZE(sde_irq_map) ||
-				end_idx > ARRAY_SIZE(sde_irq_map))
+		/*
+		 * Each Interrupt register has dynamic range of indexes,
+		 * initialized during hw_intr_init when sde_irq_tbl is created.
+		 */
+		start_idx = intr->sde_irq_tbl[reg_idx].map_idx_start;
+		end_idx = intr->sde_irq_tbl[reg_idx].map_idx_end;
+
+		if (start_idx >= intr->sde_irq_map_size ||
+				end_idx > intr->sde_irq_map_size)
 			continue;
 
 		/*
@@ -959,8 +608,10 @@
 		for (irq_idx = start_idx;
 				(irq_idx < end_idx) && irq_status;
 				irq_idx++)
-			if ((irq_status & sde_irq_map[irq_idx].irq_mask) &&
-				(sde_irq_map[irq_idx].reg_idx == reg_idx)) {
+			if ((irq_status &
+				intr->sde_irq_map[irq_idx].irq_mask) &&
+				(intr->sde_irq_map[irq_idx].reg_idx ==
+				 reg_idx)) {
 				/*
 				 * Once a match on irq mask, perform a callback
 				 * to the given cbfunc. cbfunc will take care
@@ -979,7 +630,8 @@
 				 * with the matching mask. Once irq_status
 				 * is all cleared, the search can be stopped.
 				 */
-				irq_status &= ~sde_irq_map[irq_idx].irq_mask;
+				irq_status &=
+					~intr->sde_irq_map[irq_idx].irq_mask;
 			}
 	}
 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
@@ -997,14 +649,19 @@
 	if (!intr)
 		return -EINVAL;
 
-	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+	if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
 
-	irq = &sde_irq_map[irq_idx];
+	irq = &intr->sde_irq_map[irq_idx];
 	reg_idx = irq->reg_idx;
-	reg = &sde_intr_set[reg_idx];
+	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
+		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
+		return -EINVAL;
+	}
+
+	reg = &intr->sde_irq_tbl[reg_idx];
 
 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
@@ -1043,14 +700,19 @@
 	if (!intr)
 		return -EINVAL;
 
-	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+	if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
 
-	irq = &sde_irq_map[irq_idx];
+	irq = &intr->sde_irq_map[irq_idx];
 	reg_idx = irq->reg_idx;
-	reg = &sde_intr_set[reg_idx];
+	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
+		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
+		return -EINVAL;
+	}
+
+	reg = &intr->sde_irq_tbl[reg_idx];
 
 	cache_irq_mask = intr->cache_irq_mask[reg_idx];
 	if ((cache_irq_mask & irq->irq_mask) == 0) {
@@ -1083,7 +745,7 @@
 	if (!intr)
 		return -EINVAL;
 
-	if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+	if (irq_idx < 0 || irq_idx >= intr->sde_irq_map_size) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return -EINVAL;
 	}
@@ -1102,8 +764,9 @@
 	if (!intr)
 		return -EINVAL;
 
-	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
-		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+	for (i = 0; i < intr->sde_irq_size; i++)
+		SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
+				0xffffffff);
 
 	/* ensure register writes go through */
 	wmb();
@@ -1118,8 +781,9 @@
 	if (!intr)
 		return -EINVAL;
 
-	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
-		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+	for (i = 0; i < intr->sde_irq_size; i++)
+		SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].en_off,
+				0x00000000);
 
 	/* ensure register writes go through */
 	wmb();
@@ -1160,17 +824,18 @@
 		return;
 
 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
-	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
+	for (i = 0; i < intr->sde_irq_size; i++) {
 		/* Read interrupt status */
 		intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
-				sde_intr_set[i].status_off);
+				intr->sde_irq_tbl[i].status_off);
 
 		/* Read enable mask */
-		enable_mask = SDE_REG_READ(&intr->hw, sde_intr_set[i].en_off);
+		enable_mask = SDE_REG_READ(&intr->hw,
+				intr->sde_irq_tbl[i].en_off);
 
 		/* and clear the interrupt */
 		if (intr->save_irq_status[i])
-			SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off,
+			SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[i].clr_off,
 					intr->save_irq_status[i]);
 
 		/* Finally update IRQ status based on enable mask */
@@ -1191,13 +856,18 @@
 	if (!intr)
 		return;
 
-	if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return;
 	}
 
-	reg_idx = sde_irq_map[irq_idx].reg_idx;
-	SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
+	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
+		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
+		return;
+	}
+
+	SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
 			irq_mask);
 
 	/* ensure register writes go through */
@@ -1212,9 +882,19 @@
 	if (!intr)
 		return;
 
-	reg_idx = sde_irq_map[irq_idx].reg_idx;
-	SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
-			sde_irq_map[irq_idx].irq_mask);
+	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
+		pr_err("invalid IRQ index: [%d]\n", irq_idx);
+		return;
+	}
+
+	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
+	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
+		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
+		return;
+	}
+
+	SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
+			intr->sde_irq_map[irq_idx].irq_mask);
 
 	/* ensure register writes go through */
 	wmb();
@@ -1242,17 +922,22 @@
 	if (!intr)
 		return 0;
 
-	if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return 0;
 	}
 
-	reg_idx = sde_irq_map[irq_idx].reg_idx;
+	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
+	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
+		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
+		return 0;
+	}
+
 	intr_status = SDE_REG_READ(&intr->hw,
-			sde_intr_set[reg_idx].status_off) &
-					sde_irq_map[irq_idx].irq_mask;
+			intr->sde_irq_tbl[reg_idx].status_off) &
+					intr->sde_irq_map[irq_idx].irq_mask;
 	if (intr_status && clear)
-		SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+		SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
 				intr_status);
 
 	/* ensure register writes go through */
@@ -1271,19 +956,24 @@
 	if (!intr)
 		return 0;
 
-	if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return 0;
 	}
 
+	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
+	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
+		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
+		return 0;
+	}
+
 	spin_lock_irqsave(&intr->irq_lock, irq_flags);
 
-	reg_idx = sde_irq_map[irq_idx].reg_idx;
 	intr_status = SDE_REG_READ(&intr->hw,
-			sde_intr_set[reg_idx].status_off) &
-					sde_irq_map[irq_idx].irq_mask;
+			intr->sde_irq_tbl[reg_idx].status_off) &
+					intr->sde_irq_map[irq_idx].irq_mask;
 	if (intr_status && clear)
-		SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+		SDE_REG_WRITE(&intr->hw, intr->sde_irq_tbl[reg_idx].clr_off,
 				intr_status);
 
 	/* ensure register writes go through */
@@ -1299,21 +989,25 @@
 {
 	int reg_idx;
 	unsigned long irq_flags;
-	u32 intr_status;
+	u32 intr_status = 0;
 
 	if (!intr)
 		return 0;
 
-	if (irq_idx >= ARRAY_SIZE(sde_irq_map) || irq_idx < 0) {
+	if (irq_idx >= intr->sde_irq_map_size || irq_idx < 0) {
 		pr_err("invalid IRQ index: [%d]\n", irq_idx);
 		return 0;
 	}
 
-	spin_lock_irqsave(&intr->irq_lock, irq_flags);
+	reg_idx = intr->sde_irq_map[irq_idx].reg_idx;
+	if (reg_idx < 0 || reg_idx > intr->sde_irq_size) {
+		pr_err("invalid irq reg:%d irq:%d\n", reg_idx, irq_idx);
+		return 0;
+	}
 
-	reg_idx = sde_irq_map[irq_idx].reg_idx;
+	spin_lock_irqsave(&intr->irq_lock, irq_flags);
 	intr_status = SDE_REG_READ(&intr->hw,
-			sde_intr_set[reg_idx].status_off);
+			intr->sde_irq_tbl[reg_idx].status_off);
 	spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
 
 	return intr_status;
@@ -1353,54 +1047,423 @@
 	return &m->mdss[0];
 }
 
-struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
-		struct sde_mdss_cfg *m)
+static inline int _sde_hw_intr_init_sde_irq_tbl(u32 irq_tbl_size,
+	struct sde_intr_reg *sde_irq_tbl)
 {
-	struct sde_hw_intr *intr;
-	struct sde_mdss_base_cfg *cfg;
+	int idx;
+	struct sde_intr_reg *sde_irq;
 
-	if (!addr || !m)
-		return ERR_PTR(-EINVAL);
+	for (idx = 0; idx < irq_tbl_size; idx++) {
+		sde_irq = &sde_irq_tbl[idx];
 
-	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
-	if (!intr)
-		return ERR_PTR(-ENOMEM);
+		switch (sde_irq->sde_irq_idx) {
+		case MDSS_INTR_SSPP_TOP0_INTR:
+			sde_irq->clr_off =
+				MDP_SSPP_TOP0_OFF+INTR_CLEAR;
+			sde_irq->en_off =
+				MDP_SSPP_TOP0_OFF+INTR_EN;
+			sde_irq->status_off =
+				MDP_SSPP_TOP0_OFF+INTR_STATUS;
+			break;
+		case MDSS_INTR_SSPP_TOP0_INTR2:
+			sde_irq->clr_off =
+				MDP_SSPP_TOP0_OFF+INTR2_CLEAR;
+			sde_irq->en_off =
+				MDP_SSPP_TOP0_OFF+INTR2_EN;
+			sde_irq->status_off =
+				MDP_SSPP_TOP0_OFF+INTR2_STATUS;
+			break;
+		case MDSS_INTR_SSPP_TOP0_HIST_INTR:
+			sde_irq->clr_off =
+				MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR;
+			sde_irq->en_off =
+				MDP_SSPP_TOP0_OFF+HIST_INTR_EN;
+			sde_irq->status_off =
+				MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS;
+			break;
+		case MDSS_INTR_INTF_0_INTR:
+			sde_irq->clr_off =
+				MDP_INTF_0_OFF+INTF_INTR_CLEAR;
+			sde_irq->en_off =
+				MDP_INTF_0_OFF+INTF_INTR_EN;
+			sde_irq->status_off =
+				MDP_INTF_0_OFF+INTF_INTR_STATUS;
+			break;
+		case MDSS_INTR_INTF_1_INTR:
+			sde_irq->clr_off =
+				MDP_INTF_1_OFF+INTF_INTR_CLEAR;
+			sde_irq->en_off =
+				MDP_INTF_1_OFF+INTF_INTR_EN;
+			sde_irq->status_off =
+				MDP_INTF_1_OFF+INTF_INTR_STATUS;
+			break;
+		case MDSS_INTR_INTF_2_INTR:
+			sde_irq->clr_off =
+				MDP_INTF_2_OFF+INTF_INTR_CLEAR;
+			sde_irq->en_off =
+				MDP_INTF_2_OFF+INTF_INTR_EN;
+			sde_irq->status_off =
+				MDP_INTF_2_OFF+INTF_INTR_STATUS;
+			break;
+		case MDSS_INTR_INTF_3_INTR:
+			sde_irq->clr_off =
+				MDP_INTF_3_OFF+INTF_INTR_CLEAR;
+			sde_irq->en_off =
+				MDP_INTF_3_OFF+INTF_INTR_EN;
+			sde_irq->status_off =
+				MDP_INTF_3_OFF+INTF_INTR_STATUS;
+			break;
+		case MDSS_INTR_INTF_4_INTR:
+			sde_irq->clr_off =
+				MDP_INTF_4_OFF+INTF_INTR_CLEAR;
+			sde_irq->en_off =
+				MDP_INTF_4_OFF+INTF_INTR_EN;
+			sde_irq->status_off =
+				MDP_INTF_4_OFF+INTF_INTR_STATUS;
+			break;
+		case MDSS_INTR_AD4_0_INTR:
+			sde_irq->clr_off =
+				MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF;
+			sde_irq->en_off =
+				MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF;
+			sde_irq->status_off =
+				MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF;
+			break;
+		case MDSS_INTR_AD4_1_INTR:
+			sde_irq->clr_off =
+				MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF;
+			sde_irq->en_off =
+				MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF;
+			sde_irq->status_off =
+				MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF;
+			break;
+		case MDSS_INTF_TEAR_1_INTR:
+			sde_irq->clr_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
+				MDP_INTF_TEAR_INTR_CLEAR_OFF;
+			sde_irq->en_off =
+				MDP_INTF_TEAR_INTF_1_IRQ_OFF +
+				MDP_INTF_TEAR_INTR_EN_OFF;
+			sde_irq->status_off = MDP_INTF_TEAR_INTF_1_IRQ_OFF +
+				MDP_INTF_TEAR_INTR_STATUS_OFF;
+			break;
+		case MDSS_INTF_TEAR_2_INTR:
+			sde_irq->clr_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
+				MDP_INTF_TEAR_INTR_CLEAR_OFF;
+			sde_irq->en_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
+				MDP_INTF_TEAR_INTR_EN_OFF;
+			sde_irq->status_off = MDP_INTF_TEAR_INTF_2_IRQ_OFF +
+				MDP_INTF_TEAR_INTR_STATUS_OFF;
+			break;
+		default:
+			pr_err("wrong irq idx %d\n",
+				sde_irq->sde_irq_idx);
+			return -EINVAL;
+		}
 
-	cfg = __intr_offset(m, addr, &intr->hw);
-	if (!cfg) {
-		kfree(intr);
-		return ERR_PTR(-EINVAL);
-	}
-	__setup_intr_ops(&intr->ops);
-
-	intr->irq_idx_tbl_size = ARRAY_SIZE(sde_irq_map);
-
-	intr->cache_irq_mask = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
-			GFP_KERNEL);
-	if (intr->cache_irq_mask == NULL) {
-		kfree(intr);
-		return ERR_PTR(-ENOMEM);
+		pr_debug("idx:%d irq_idx:%d clr:0x%x en:0x%x status:0x%x\n",
+			idx, sde_irq->sde_irq_idx, sde_irq->clr_off,
+			sde_irq->en_off, sde_irq->status_off);
 	}
 
-	intr->save_irq_status = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
-			GFP_KERNEL);
-	if (intr->save_irq_status == NULL) {
-		kfree(intr->cache_irq_mask);
-		kfree(intr);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	spin_lock_init(&intr->irq_lock);
-
-	return intr;
+	return 0;
 }
 
 void sde_hw_intr_destroy(struct sde_hw_intr *intr)
 {
 	if (intr) {
+		kfree(intr->sde_irq_tbl);
+		kfree(intr->sde_irq_map);
 		kfree(intr->cache_irq_mask);
 		kfree(intr->save_irq_status);
 		kfree(intr);
 	}
 }
 
+static inline u32 _get_irq_map_size(int idx)
+{
+	u32 ret = 0;
+
+	switch (idx) {
+	case MDSS_INTR_SSPP_TOP0_INTR:
+		ret = ARRAY_SIZE(sde_irq_intr_map);
+		break;
+	case MDSS_INTR_SSPP_TOP0_INTR2:
+		ret = ARRAY_SIZE(sde_irq_intr2_map);
+		break;
+	case MDSS_INTR_SSPP_TOP0_HIST_INTR:
+		ret = ARRAY_SIZE(sde_irq_hist_map);
+		break;
+	case MDSS_INTR_INTF_0_INTR:
+		ret = ARRAY_SIZE(sde_irq_intf0_map);
+		break;
+	case MDSS_INTR_INTF_1_INTR:
+		ret = ARRAY_SIZE(sde_irq_inf1_map);
+		break;
+	case MDSS_INTR_INTF_2_INTR:
+		ret = ARRAY_SIZE(sde_irq_intf2_map);
+		break;
+	case MDSS_INTR_INTF_3_INTR:
+		ret = ARRAY_SIZE(sde_irq_intf3_map);
+		break;
+	case MDSS_INTR_INTF_4_INTR:
+		ret = ARRAY_SIZE(sde_irq_inf4_map);
+		break;
+	case MDSS_INTR_AD4_0_INTR:
+		ret = ARRAY_SIZE(sde_irq_ad4_0_map);
+		break;
+	case MDSS_INTR_AD4_1_INTR:
+		ret = ARRAY_SIZE(sde_irq_ad4_1_map);
+		break;
+	case MDSS_INTF_TEAR_1_INTR:
+		ret = ARRAY_SIZE(sde_irq_intf1_te_map);
+		break;
+	case MDSS_INTF_TEAR_2_INTR:
+		ret = ARRAY_SIZE(sde_irq_intf2_te_map);
+		break;
+	default:
+		pr_err("invalid idx:%d\n");
+	}
+
+	return ret;
+}
+
+static inline struct sde_irq_type *_get_irq_map_addr(int idx)
+{
+	struct sde_irq_type *ret = NULL;
+
+	switch (idx) {
+	case MDSS_INTR_SSPP_TOP0_INTR:
+		ret = sde_irq_intr_map;
+		break;
+	case MDSS_INTR_SSPP_TOP0_INTR2:
+		ret = sde_irq_intr2_map;
+		break;
+	case MDSS_INTR_SSPP_TOP0_HIST_INTR:
+		ret = sde_irq_hist_map;
+		break;
+	case MDSS_INTR_INTF_0_INTR:
+		ret = sde_irq_intf0_map;
+		break;
+	case MDSS_INTR_INTF_1_INTR:
+		ret = sde_irq_inf1_map;
+		break;
+	case MDSS_INTR_INTF_2_INTR:
+		ret = sde_irq_intf2_map;
+		break;
+	case MDSS_INTR_INTF_3_INTR:
+		ret = sde_irq_intf3_map;
+		break;
+	case MDSS_INTR_INTF_4_INTR:
+		ret = sde_irq_inf4_map;
+		break;
+	case MDSS_INTR_AD4_0_INTR:
+		ret = sde_irq_ad4_0_map;
+		break;
+	case MDSS_INTR_AD4_1_INTR:
+		ret = sde_irq_ad4_1_map;
+		break;
+	case MDSS_INTF_TEAR_1_INTR:
+		ret = sde_irq_intf1_te_map;
+		break;
+	case MDSS_INTF_TEAR_2_INTR:
+		ret = sde_irq_intf2_te_map;
+		break;
+	default:
+		pr_err("invalid idx:%d\n");
+	}
+
+	return ret;
+}
+
+static int _sde_copy_regs(struct sde_irq_type *sde_irq_map, u32 size,
+	u32 irq_idx, u32 low_idx, u32 high_idx)
+{
+	int i, j = 0;
+	struct sde_irq_type *src = _get_irq_map_addr(irq_idx);
+	u32 src_size = _get_irq_map_size(irq_idx);
+
+	if (!src)
+		return -EINVAL;
+
+	if (low_idx >= size || high_idx > size ||
+		(high_idx - low_idx > src_size)) {
+		pr_err("invalid size l:%d h:%d dst:%d src:%d\n",
+			low_idx, high_idx, size, src_size);
+		return -EINVAL;
+	}
+
+	for (i = low_idx; i < high_idx; i++)
+		sde_irq_map[i] = src[j++];
+
+	return 0;
+}
+
+static int _sde_hw_intr_init_irq_tables(struct sde_hw_intr *intr,
+	struct sde_mdss_cfg *m)
+{
+	int i, idx, sde_irq_tbl_idx = 0, ret = 0;
+	u32 low_idx, high_idx;
+	u32 sde_irq_map_idx = 0;
+
+	/* Initialize the offset of the irq's in the sde_irq_map table */
+	for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
+		if (test_bit(idx, m->mdss_irqs)) {
+			low_idx = sde_irq_map_idx;
+			high_idx = low_idx + _get_irq_map_size(idx);
+
+			pr_debug("init[%d]=%d low:%d high:%d\n",
+				sde_irq_tbl_idx, idx, low_idx, high_idx);
+
+			if (sde_irq_tbl_idx >= intr->sde_irq_size ||
+				sde_irq_tbl_idx < 0) {
+				ret = -EINVAL;
+				goto exit;
+			}
+
+			/* init sde_irq_map with the global irq mapping table */
+			if (_sde_copy_regs(intr->sde_irq_map,
+					intr->sde_irq_map_size,
+					idx, low_idx, high_idx)) {
+				ret = -EINVAL;
+				goto exit;
+			}
+
+			/* init irq map with its reg idx within the irq tbl */
+			for (i = low_idx; i < high_idx; i++) {
+				intr->sde_irq_map[i].reg_idx = sde_irq_tbl_idx;
+				pr_debug("sde_irq_map[%d].reg_idx=%d\n",
+						i, sde_irq_tbl_idx);
+			}
+
+			/* track the idx of the mapping table for this irq in
+			 * sde_irq_map, this to only access the indexes of this
+			 * irq during the irq dispatch
+			 */
+			intr->sde_irq_tbl[sde_irq_tbl_idx].sde_irq_idx = idx;
+			intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_start =
+				low_idx;
+			intr->sde_irq_tbl[sde_irq_tbl_idx].map_idx_end =
+				high_idx;
+
+			/* increment idx for both tables accordingly */
+			sde_irq_tbl_idx++;
+			sde_irq_map_idx = high_idx;
+		}
+	}
+
+	/* do this after 'sde_irq_idx is initialized in sde_irq_tbl */
+	ret = _sde_hw_intr_init_sde_irq_tbl(intr->sde_irq_size,
+			intr->sde_irq_tbl);
+
+exit:
+	return ret;
+}
+
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_intr *intr = NULL;
+	struct sde_mdss_base_cfg *cfg;
+	u32 irq_regs_count = 0;
+	u32 irq_map_count = 0;
+	u32 size;
+	int idx;
+	int ret = 0;
+
+	if (!addr || !m) {
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+	if (!intr) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	cfg = __intr_offset(m, addr, &intr->hw);
+	if (!cfg) {
+		ret = -EINVAL;
+		goto exit;
+	}
+	__setup_intr_ops(&intr->ops);
+
+	if (MDSS_INTR_MAX >= UINT_MAX) {
+		pr_err("max intr exceeded:%d\n", MDSS_INTR_MAX);
+		ret  = -EINVAL;
+		goto exit;
+	}
+
+	/* check how many irq's this target supports */
+	for (idx = 0; idx < MDSS_INTR_MAX; idx++) {
+		if (test_bit(idx, m->mdss_irqs)) {
+			irq_regs_count++;
+
+			size = _get_irq_map_size(idx);
+			if (!size || irq_map_count >= UINT_MAX - size) {
+				pr_err("wrong map cnt idx:%d sz:%d cnt:%d\n",
+					idx, size, irq_map_count);
+				ret = -EINVAL;
+				goto exit;
+			}
+
+			irq_map_count += size;
+		}
+	}
+
+	if (irq_regs_count == 0 || irq_regs_count > MDSS_INTR_MAX ||
+		irq_map_count == 0) {
+		pr_err("wrong mapping of supported irqs 0x%x\n",
+			m->mdss_irqs[0]);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* Allocate table for the irq registers */
+	intr->sde_irq_size = irq_regs_count;
+	intr->sde_irq_tbl = kcalloc(irq_regs_count, sizeof(*intr->sde_irq_tbl),
+		GFP_KERNEL);
+	if (intr->sde_irq_tbl == NULL) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/* Allocate table with the valid interrupts bits */
+	intr->sde_irq_map_size = irq_map_count;
+	intr->sde_irq_map = kcalloc(irq_map_count, sizeof(*intr->sde_irq_map),
+		GFP_KERNEL);
+	if (intr->sde_irq_map == NULL) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	/* Initialize IRQs tables */
+	ret = _sde_hw_intr_init_irq_tables(intr, m);
+	if (ret)
+		goto exit;
+
+	intr->cache_irq_mask = kcalloc(intr->sde_irq_size,
+			sizeof(*intr->cache_irq_mask), GFP_KERNEL);
+	if (intr->cache_irq_mask == NULL) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	intr->save_irq_status = kcalloc(intr->sde_irq_size,
+			sizeof(*intr->save_irq_status), GFP_KERNEL);
+	if (intr->save_irq_status == NULL) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	spin_lock_init(&intr->irq_lock);
+
+exit:
+	if (ret) {
+		sde_hw_intr_destroy(intr);
+		return ERR_PTR(ret);
+	}
+
+	return intr;
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
index ef2fe77..0a552b4 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -113,11 +113,13 @@
 	/**
 	 * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
 	 *                 Used for all irq related ops
+	 * @intr:	HW interrupt handle
 	 * @intr_type:		Interrupt type defined in sde_intr_type
 	 * @instance_idx:	HW interrupt block instance
 	 * @return:		irq_idx or -EINVAL for lookup fail
 	 */
 	int (*irq_idx_lookup)(
+			struct sde_hw_intr *intr,
 			enum sde_intr_type intr_type,
 			u32 instance_idx);
 
@@ -286,15 +288,22 @@
  * @ops:              function pointer mapping for IRQ handling
  * @cache_irq_mask:   array of IRQ enable masks reg storage created during init
  * @save_irq_status:  array of IRQ status reg storage created during init
- * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
  * @irq_lock:         spinlock for accessing IRQ resources
+ * @sde_irq_size:   total number of elements of the sde_irq_tbl
+ * @sde_irq_tbl:	table with the registesrs offsets of the sde interrupts
+ *		supported by the hw
+ * @sde_irq_map_size: total number of elements of the 'sde_irq_map'
+ * @sde_irq_map: total number of interrupt bits valid within the irq regs
  */
 struct sde_hw_intr {
 	struct sde_hw_blk_reg_map hw;
 	struct sde_hw_intr_ops ops;
 	u32 *cache_irq_mask;
 	u32 *save_irq_status;
-	u32 irq_idx_tbl_size;
+	u32 sde_irq_size;
+	struct sde_intr_reg *sde_irq_tbl;
+	u32 sde_irq_map_size;
+	struct sde_irq_type *sde_irq_map;
 	spinlock_t irq_lock;
 };
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
index 0ff08a3..751a963 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_lm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -258,7 +258,8 @@
 	if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion) ||
 			IS_SM8150_TARGET(m->hwversion) ||
 			IS_SDMSHRIKE_TARGET(m->hwversion) ||
-			IS_SM6150_TARGET(m->hwversion))
+			IS_SM6150_TARGET(m->hwversion) ||
+			IS_KONA_TARGET(m->hwversion))
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config_sdm845;
 	else
 		ops->setup_blend_config = sde_hw_lm_setup_blend_config;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index e0ae41f..89622b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -1,4 +1,3 @@
-// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
@@ -726,7 +725,7 @@
 	int i, ops = 0, ret = 0;
 	bool old_valid_fb = false;
 
-	for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+	for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
 		if (!crtc->state || !crtc->state->active)
 			continue;
 		/*
@@ -741,7 +740,7 @@
 		 * 1. Check if old state on the CRTC has planes
 		 * staged with valid fbs
 		 */
-		for_each_plane_in_state(state, plane, plane_state, i) {
+		for_each_old_plane_in_state(state, plane, plane_state, i) {
 			if (!plane_state->crtc)
 				continue;
 			if (plane_state->fb) {
@@ -1000,7 +999,7 @@
 		sde_kms->first_kickoff = false;
 	}
 
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
 		list_for_each_entry(encoder, &dev->mode_config.encoder_list,
 				head) {
 			if (encoder->crtc != crtc)
@@ -1038,7 +1037,7 @@
 	}
 
 	SDE_ATRACE_BEGIN("sde_kms_commit");
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
 		if (crtc->state->active) {
 			SDE_EVT32(DRMID(crtc));
 			sde_crtc_commit_kickoff(crtc, old_crtc_state);
@@ -1154,7 +1153,7 @@
 
 	SDE_ATRACE_BEGIN("sde_kms_complete_commit");
 
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
 		sde_crtc_complete_commit(crtc, old_crtc_state);
 
 		/* complete secure transitions if any */
@@ -1162,7 +1161,8 @@
 			_sde_kms_secure_ctrl(sde_kms, crtc, true);
 	}
 
-	for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+	for_each_old_connector_in_state(old_state, connector,
+			old_conn_state, i) {
 		struct sde_connector *c_conn;
 
 		c_conn = to_sde_connector(connector);
@@ -1177,7 +1177,7 @@
 
 	sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
 
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
 		_sde_kms_release_splash_resource(sde_kms, crtc);
 
 	SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
@@ -1254,7 +1254,7 @@
 	}
 
 	/* old_state actually contains updated crtc pointers */
-	for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+	for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
 		if (crtc->state->active)
 			sde_crtc_prepare_commit(crtc, old_crtc_state);
 	}
@@ -2060,8 +2060,6 @@
 	}
 
 end:
-	drm_atomic_clean_old_fb(dev, plane_mask, ret);
-
 	return ret;
 }
 
@@ -2253,7 +2251,7 @@
 	dev = sde_kms->dev;
 
 	/* iterate state object for active secure/non-secure crtc */
-	for_each_crtc_in_state(state, crtc, crtc_state, i) {
+	for_each_old_crtc_in_state(state, crtc, crtc_state, i) {
 		if (!crtc_state->active)
 			continue;
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 46a3758..e01a72f 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 7da7e18..6a095c9 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1,4 +1,3 @@
-// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
@@ -2193,7 +2192,7 @@
 		SDE_DEBUG("cleared fb_id\n");
 		rstate->out_fb = NULL;
 	} else if (!rstate->out_fb) {
-		fb = drm_framebuffer_lookup(plane->dev, fb_id);
+		fb = drm_framebuffer_lookup(plane->dev, NULL, fb_id);
 		if (fb) {
 			SDE_DEBUG("plane%d.%d get fb:%d\n", plane->base.id,
 					rstate->sequence_id, fb_id);
@@ -4882,7 +4881,7 @@
 		msm_property_destroy(&psde->property_info);
 		mutex_destroy(&psde->lock);
 
-		drm_plane_helper_disable(plane);
+		drm_plane_helper_disable(plane, NULL);
 
 		/* this will destroy the states as well */
 		drm_plane_cleanup(plane);
@@ -5359,7 +5358,7 @@
 	psde->pipe = pipe;
 	psde->is_virtual = (master_plane_id != 0);
 	INIT_LIST_HEAD(&psde->mplane_list);
-	master_plane = drm_plane_find(dev, master_plane_id);
+	master_plane = drm_plane_find(dev, NULL, master_plane_id);
 	if (master_plane) {
 		struct sde_plane *mpsde = to_sde_plane(master_plane);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
index dc787a7..33f8903 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.h
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  * Copyright (C) 2013 Red Hat
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
index 53d7280..406289e 100644
--- a/drivers/gpu/drm/msm/sde/sde_trace.h
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -173,7 +173,7 @@
 
 #define SDE_TRACE_EVTLOG_SIZE	15
 TRACE_EVENT(sde_evtlog,
-	TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 data[]),
+	TP_PROTO(const char *tag, u32 tag_id, u32 cnt, u32 *data),
 	TP_ARGS(tag, tag_id, cnt, data),
 	TP_STRUCT__entry(
 			__field(int, pid)
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
index 7d2fde4..525252e 100644
--- a/drivers/gpu/drm/msm/sde/sde_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -84,7 +84,7 @@
 				SDE_ERROR("failed to create mode\n");
 				break;
 			}
-			ret = drm_mode_convert_umode(mode,
+			ret = drm_mode_convert_umode(wb_dev->drm_dev, mode,
 					&wb_dev->modes[i]);
 			if (ret) {
 				SDE_ERROR("failed to convert mode %d\n", ret);
@@ -193,8 +193,8 @@
 				struct drm_display_mode dispmode;
 
 				memset(&dispmode, 0, sizeof(dispmode));
-				ret = drm_mode_convert_umode(&dispmode,
-						&modeinfo[i]);
+				ret = drm_mode_convert_umode(wb_dev->drm_dev,
+						&dispmode, &modeinfo[i]);
 				if (ret) {
 					SDE_ERROR(
 						"failed to convert mode %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x status:%d rc:%d\n",
@@ -537,7 +537,7 @@
 
 	priv = drm_dev->dev_private;
 
-	connector = drm_connector_lookup(drm_dev, connector_id);
+	connector = drm_connector_lookup(drm_dev, file_priv, connector_id);
 	if (!connector) {
 		SDE_ERROR("failed to find connector\n");
 		rc = -ENOENT;
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 7c3aebe..a924de9 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -67,6 +67,11 @@
 #define DBG_CTRL_RESET_HW_PANIC	BIT(2)
 #define DBG_CTRL_MAX			BIT(3)
 
+#define DUMP_BUF_SIZE			(4096 * 512)
+#define DUMP_CLMN_COUNT			4
+#define DUMP_LINE_SIZE			256
+#define DUMP_MAX_LINES_PER_BLK		512
+
 /**
  * struct sde_dbg_reg_offset - tracking for start and end of region
  * @start: start offset
@@ -162,6 +167,29 @@
 	struct vbif_debug_bus_entry *entries;
 };
 
+struct sde_dbg_dsi_debug_bus {
+	u32 *entries;
+	u32 size;
+};
+
+/**
+ * struct sde_dbg_regbuf - wraps buffer and tracking params for register dumps
+ * @buf: pointer to allocated memory for storing register dumps in hw recovery
+ * @buf_size: size of the memory allocated
+ * @len: size of the dump data valid in the buffer
+ * @rpos: cursor points to the buffer position read by client
+ * @dump_done: to indicate if dumping to user memory is complete
+ * @cur_blk: points to the current sde_dbg_reg_base block
+ */
+struct sde_dbg_regbuf {
+	char *buf;
+	int buf_size;
+	int len;
+	int rpos;
+	int dump_done;
+	struct sde_dbg_reg_base *cur_blk;
+};
+
 /**
  * struct sde_dbg_base - global sde debug base structure
  * @evtlog: event log instance
@@ -178,6 +206,10 @@
  * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
  * @dump_all: dump all entries in register dump
  * @dsi_dbg_bus: dump dsi debug bus register
+ * @regbuf: buffer data to track the register dumping in hw recovery
+ * @cur_evt_index: index used for tracking event logs dump in hw recovery
+ * @dbgbus_dump_idx: index used for tracking dbg-bus dump in hw recovery
+ * @vbif_dbgbus_dump_idx: index for tracking vbif dumps in hw recovery
  */
 static struct sde_dbg_base {
 	struct sde_dbg_evtlog *evtlog;
@@ -195,9 +227,15 @@
 
 	struct sde_dbg_sde_debug_bus dbgbus_sde;
 	struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
+	struct sde_dbg_dsi_debug_bus dbgbus_dsi;
 	bool dump_all;
 	bool dsi_dbg_bus;
 	u32 debugfs_ctrl;
+
+	struct sde_dbg_regbuf regbuf;
+	u32 cur_evt_index;
+	u32 dbgbus_dump_idx;
+	u32 vbif_dbgbus_dump_idx;
 } sde_dbg_base;
 
 /* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
@@ -3292,6 +3330,42 @@
 	{0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
 };
 
+static u32 dsi_dbg_bus_sdm845[] = {
+	0x0001, 0x1001, 0x0001, 0x0011,
+	0x1021, 0x0021, 0x0031, 0x0041,
+	0x0051, 0x0061, 0x3061, 0x0061,
+	0x2061, 0x2061, 0x1061, 0x1061,
+	0x1061, 0x0071, 0x0071, 0x0071,
+	0x0081, 0x0081, 0x00A1, 0x00A1,
+	0x10A1, 0x20A1, 0x30A1, 0x10A1,
+	0x10A1, 0x30A1, 0x20A1, 0x00B1,
+	0x00C1, 0x00C1, 0x10C1, 0x20C1,
+	0x30C1, 0x00D1, 0x00D1, 0x20D1,
+	0x30D1, 0x00E1, 0x00E1, 0x00E1,
+	0x00F1, 0x00F1, 0x0101, 0x0101,
+	0x1101, 0x2101, 0x3101, 0x0111,
+	0x0141, 0x1141, 0x0141, 0x1141,
+	0x1141, 0x0151, 0x0151, 0x1151,
+	0x2151, 0x3151, 0x0161, 0x0161,
+	0x1161, 0x0171, 0x0171, 0x0181,
+	0x0181, 0x0191, 0x0191, 0x01A1,
+	0x01A1, 0x01B1, 0x01B1, 0x11B1,
+	0x21B1, 0x01C1, 0x01C1, 0x11C1,
+	0x21C1, 0x31C1, 0x01D1, 0x01D1,
+	0x01D1, 0x01D1, 0x11D1, 0x21D1,
+	0x21D1, 0x01E1, 0x01E1, 0x01F1,
+	0x01F1, 0x0201, 0x0201, 0x0211,
+	0x0221, 0x0231, 0x0241, 0x0251,
+	0x0281, 0x0291, 0x0281, 0x0291,
+	0x02A1, 0x02B1, 0x02C1, 0x0321,
+	0x0321, 0x1321, 0x2321, 0x3321,
+	0x0331, 0x0331, 0x1331, 0x0341,
+	0x0341, 0x1341, 0x2341, 0x3341,
+	0x0351, 0x0361, 0x0361, 0x1361,
+	0x2361, 0x0371, 0x0381, 0x0391,
+	0x03C1, 0x03D1, 0x03E1, 0x03F1,
+};
+
 /**
  * _sde_dbg_enable_power - use callback to turn power on for hw register access
  * @enable: whether to turn power on or off
@@ -3341,7 +3415,8 @@
 
 	if (in_log)
 		dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n",
-				dump_name, addr - base_addr, len_bytes);
+				dump_name, (unsigned long)(addr - base_addr),
+					len_bytes);
 
 	len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
 	len_padded = len_align * REG_DUMP_ALIGN;
@@ -3359,7 +3434,7 @@
 			dev_info(sde_dbg_base.dev,
 				"%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n",
 				dump_name, dump_addr, len_padded,
-				addr - base_addr);
+				(unsigned long)(addr - base_addr));
 		} else {
 			in_mem = 0;
 			pr_err("dump_mem: kzalloc fails!\n");
@@ -3385,7 +3460,8 @@
 		if (in_log)
 			dev_info(sde_dbg_base.dev,
 					"0x%lx : %08x %08x %08x %08x\n",
-					addr - base_addr, x0, x4, x8, xc);
+					(unsigned long)(addr - base_addr),
+					x0, x4, x8, xc);
 
 		if (dump_addr) {
 			dump_addr[i * 4] = x0;
@@ -3412,12 +3488,13 @@
 {
 	u32 length = 0;
 
-	if ((range_node->start > range_node->end) ||
-		(range_node->end > max_offset) || (range_node->start == 0
-		&& range_node->end == 0)) {
+	if (range_node->start == 0 && range_node->end == 0) {
 		length = max_offset;
-	} else {
-		length = range_node->end - range_node->start;
+	} else if (range_node->start < max_offset) {
+		if (range_node->end > max_offset)
+			length = max_offset - range_node->start;
+		else if (range_node->start < range_node->end)
+			length = range_node->end - range_node->start;
 	}
 
 	return length;
@@ -3857,7 +3934,8 @@
 		_sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
 
 	if (sde_dbg_base.dsi_dbg_bus || dump_all)
-		dsi_ctrl_debug_dump();
+		dsi_ctrl_debug_dump(sde_dbg_base.dbgbus_dsi.entries,
+				    sde_dbg_base.dbgbus_dsi.size);
 
 	if (do_panic && sde_dbg_base.panic_on_err)
 		panic(name);
@@ -4018,6 +4096,12 @@
 	/* non-seekable */
 	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
 	file->private_data = inode->i_private;
+	mutex_lock(&sde_dbg_base.mutex);
+	sde_dbg_base.cur_evt_index = 0;
+	sde_dbg_base.evtlog->first = sde_dbg_base.evtlog->curr + 1;
+	sde_dbg_base.evtlog->last =
+		sde_dbg_base.evtlog->first + SDE_EVTLOG_ENTRY;
+	mutex_unlock(&sde_dbg_base.mutex);
 	return 0;
 }
 
@@ -4037,8 +4121,13 @@
 	if (!buff || !ppos)
 		return -EINVAL;
 
-	len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf,
-			SDE_EVTLOG_BUF_MAX, true);
+	mutex_lock(&sde_dbg_base.mutex);
+	len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog,
+			evtlog_buf, SDE_EVTLOG_BUF_MAX,
+			!sde_dbg_base.cur_evt_index, true);
+	sde_dbg_base.cur_evt_index++;
+	mutex_unlock(&sde_dbg_base.mutex);
+
 	if (len < 0 || len > count) {
 		pr_err("len is more than user buffer size\n");
 		return 0;
@@ -4228,6 +4317,329 @@
 	.release =	seq_release
 };
 
+static int sde_recovery_regdump_open(struct inode *inode, struct file *file)
+{
+	if (!inode || !file)
+		return -EINVAL;
+
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+
+	/* initialize to start position */
+	sde_dbg_base.regbuf.rpos = 0;
+	sde_dbg_base.regbuf.cur_blk = NULL;
+	sde_dbg_base.regbuf.dump_done = false;
+
+	return 0;
+}
+
+static ssize_t _sde_dbg_dump_reg_rows(u32 reg_start,
+		void *start, int count, char *buf, int buflen)
+{
+	int i;
+	int len = 0;
+	u32 *addr;
+	u32  reg_offset = 0;
+	int rows = min(count / DUMP_CLMN_COUNT, DUMP_MAX_LINES_PER_BLK);
+
+	if (!start || !buf) {
+		pr_err("invalid address for dump\n");
+		return len;
+	}
+
+	if (buflen < PAGE_SIZE) {
+		pr_err("buffer too small for dump\n");
+		return len;
+	}
+
+	for (i = 0; i < rows; i++) {
+		addr = start + (i * DUMP_CLMN_COUNT * sizeof(u32));
+		reg_offset = reg_start + (i * DUMP_CLMN_COUNT * sizeof(u32));
+		if (buflen < (len + DUMP_LINE_SIZE))
+			break;
+
+		len += snprintf(buf + len, DUMP_LINE_SIZE,
+				"0x%.8X | %.8X %.8X %.8X %.8X\n",
+				reg_offset, addr[0], addr[1], addr[2], addr[3]);
+	}
+
+	return len;
+}
+
+static int  _sde_dbg_recovery_dump_sub_blk(struct sde_dbg_reg_range *sub_blk,
+		char  *buf, int buflen)
+{
+	int count = 0;
+	int len = 0;
+
+	if (!sub_blk || (buflen < PAGE_SIZE)) {
+		pr_err("invalid params buflen:%d subblk valid:%d\n",
+				buflen, sub_blk != NULL);
+		return len;
+	}
+
+	count = (sub_blk->offset.end - sub_blk->offset.start) / (sizeof(u32));
+	if (count < DUMP_CLMN_COUNT) {
+		pr_err("invalid count for register dumps :%d\n", count);
+		return len;
+	}
+
+	len += snprintf(buf + len, DUMP_LINE_SIZE,
+			"------------------------------------------\n");
+	len += snprintf(buf + len, DUMP_LINE_SIZE,
+			"**** sub block [%s] - size:%d ****\n",
+			sub_blk->range_name, count);
+	len += _sde_dbg_dump_reg_rows(sub_blk->offset.start, sub_blk->reg_dump,
+			count, buf + len, buflen - len);
+
+	return len;
+}
+
+static int  _sde_dbg_recovery_dump_reg_blk(struct sde_dbg_reg_base *blk,
+		char *buf, int buf_size, int *out_len)
+{
+	int ret = 0;
+	int len = 0;
+	struct sde_dbg_reg_range *sub_blk;
+
+	if (buf_size < PAGE_SIZE) {
+		pr_err("buffer too small for dump\n");
+		return len;
+	}
+
+	if (!blk || !strlen(blk->name)) {
+		len += snprintf(buf + len, DUMP_LINE_SIZE,
+			"Found one invalid block - skip dump\n");
+		*out_len = len;
+		return len;
+	}
+
+	len += snprintf(buf + len, DUMP_LINE_SIZE,
+			"******************************************\n");
+	len += snprintf(buf + len, DUMP_LINE_SIZE,
+			"==========================================\n");
+	len += snprintf(buf + len, DUMP_LINE_SIZE,
+			"*********** DUMP of %s block *************\n",
+			blk->name);
+	len += snprintf(buf + len, DUMP_LINE_SIZE,
+			"count:%ld max-off:0x%lx has_sub_blk:%d\n",
+			blk->cnt, blk->max_offset,
+			!list_empty(&blk->sub_range_list));
+
+	if (list_empty(&blk->sub_range_list)) {
+		len += _sde_dbg_dump_reg_rows(0, blk->reg_dump,
+				blk->max_offset / sizeof(u32), buf + len,
+				buf_size - len);
+	} else {
+		list_for_each_entry(sub_blk, &blk->sub_range_list, head)
+			len += _sde_dbg_recovery_dump_sub_blk(sub_blk,
+					buf + len, buf_size - len);
+	}
+	*out_len = len;
+
+	return ret;
+}
+
+static ssize_t sde_recovery_regdump_read(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	ssize_t len = 0;
+	int usize = 0;
+	struct sde_dbg_base *dbg_base = &sde_dbg_base;
+	struct sde_dbg_regbuf *rbuf = &dbg_base->regbuf;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	if (!rbuf->dump_done && !rbuf->cur_blk) {
+		if (!rbuf->buf)
+			rbuf->buf = kzalloc(DUMP_BUF_SIZE, GFP_KERNEL);
+		if (!rbuf->buf) {
+			len =  -ENOMEM;
+			goto err;
+		}
+		rbuf->rpos = 0;
+		rbuf->len = 0;
+		rbuf->buf_size = DUMP_BUF_SIZE;
+
+		rbuf->cur_blk = list_first_entry(&dbg_base->reg_base_list,
+				struct sde_dbg_reg_base, reg_base_head);
+		if (rbuf->cur_blk)
+			_sde_dbg_recovery_dump_reg_blk(rbuf->cur_blk,
+					rbuf->buf,
+					rbuf->buf_size,
+					&rbuf->len);
+		pr_debug("dumping done for blk:%s len:%d\n", rbuf->cur_blk ?
+				rbuf->cur_blk->name : "unknown", rbuf->len);
+	} else if (rbuf->len == rbuf->rpos && rbuf->cur_blk) {
+		rbuf->rpos = 0;
+		rbuf->len = 0;
+		rbuf->buf_size = DUMP_BUF_SIZE;
+
+		if (rbuf->cur_blk == list_last_entry(&dbg_base->reg_base_list,
+					struct sde_dbg_reg_base, reg_base_head))
+			rbuf->cur_blk = NULL;
+		else
+			rbuf->cur_blk = list_next_entry(rbuf->cur_blk,
+					reg_base_head);
+
+		if (rbuf->cur_blk)
+			_sde_dbg_recovery_dump_reg_blk(rbuf->cur_blk,
+					rbuf->buf,
+					rbuf->buf_size,
+					&rbuf->len);
+		pr_debug("dumping done for blk:%s len:%d\n", rbuf->cur_blk ?
+				rbuf->cur_blk->name : "unknown", rbuf->len);
+	}
+
+	if ((rbuf->len - rbuf->rpos) > 0) {
+		usize = ((rbuf->len - rbuf->rpos) > count) ?
+			count  : rbuf->len - rbuf->rpos;
+		if (copy_to_user(ubuf, rbuf->buf + rbuf->rpos, usize)) {
+			len =  -EFAULT;
+			goto err;
+		}
+
+		len = usize;
+		rbuf->rpos += usize;
+		*ppos += usize;
+	}
+
+	if (!len && rbuf->buf)
+		rbuf->dump_done = true;
+err:
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return len;
+}
+
+static const struct file_operations sde_recovery_reg_fops = {
+	.open = sde_recovery_regdump_open,
+	.read = sde_recovery_regdump_read,
+};
+
+static int sde_recovery_dbgbus_dump_open(struct inode *inode, struct file *file)
+{
+	if (!inode || !file)
+		return -EINVAL;
+
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	sde_dbg_base.dbgbus_dump_idx = 0;
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return 0;
+}
+
+static ssize_t sde_recovery_dbgbus_dump_read(struct file *file,
+		char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	ssize_t len = 0;
+	char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+	u32 *data;
+	struct sde_dbg_sde_debug_bus *bus;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	bus = &sde_dbg_base.dbgbus_sde;
+	if (!bus->cmn.dumped_content || !bus->cmn.entries_size)
+		goto dump_done;
+
+	if (sde_dbg_base.dbgbus_dump_idx <=
+			((bus->cmn.entries_size - 1) * DUMP_CLMN_COUNT)) {
+		data = &bus->cmn.dumped_content[
+			sde_dbg_base.dbgbus_dump_idx];
+		len = snprintf(evtlog_buf, SDE_EVTLOG_BUF_MAX,
+				"0x%.8X | %.8X %.8X %.8X %.8X\n",
+				sde_dbg_base.dbgbus_dump_idx,
+				data[0], data[1], data[2], data[3]);
+		sde_dbg_base.dbgbus_dump_idx += DUMP_CLMN_COUNT;
+		if ((count < len) || copy_to_user(buff, evtlog_buf, len)) {
+			len = -EFAULT;
+			goto dump_done;
+		}
+		*ppos += len;
+	}
+dump_done:
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return len;
+}
+
+static const struct file_operations sde_recovery_dbgbus_fops = {
+	.open = sde_recovery_dbgbus_dump_open,
+	.read = sde_recovery_dbgbus_dump_read,
+};
+
+static int sde_recovery_vbif_dbgbus_dump_open(struct inode *inode,
+		struct file *file)
+{
+	if (!inode || !file)
+		return -EINVAL;
+
+	/* non-seekable */
+	file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+	file->private_data = inode->i_private;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	sde_dbg_base.vbif_dbgbus_dump_idx = 0;
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return 0;
+}
+
+static ssize_t sde_recovery_vbif_dbgbus_dump_read(struct file *file,
+		char __user *buff,
+		size_t count, loff_t *ppos)
+{
+	ssize_t len = 0;
+	char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+	int i;
+	u32 *data;
+	u32 list_size = 0;
+	struct vbif_debug_bus_entry *head;
+	struct sde_dbg_vbif_debug_bus *bus;
+
+	mutex_lock(&sde_dbg_base.mutex);
+	bus = &sde_dbg_base.dbgbus_vbif_rt;
+	if (!bus->cmn.dumped_content || !bus->cmn.entries_size)
+		goto dump_done;
+
+	/* calculate total number of test point */
+	for (i = 0; i < bus->cmn.entries_size; i++) {
+		head = bus->entries + i;
+		list_size += (head->block_cnt * head->test_pnt_cnt);
+	}
+
+	/* 4 entries for each test point*/
+	list_size *= DUMP_CLMN_COUNT;
+	if (sde_dbg_base.vbif_dbgbus_dump_idx < list_size) {
+		data = &bus->cmn.dumped_content[
+			sde_dbg_base.vbif_dbgbus_dump_idx];
+		len = snprintf(evtlog_buf, SDE_EVTLOG_BUF_MAX,
+				"0x%.8X | %.8X %.8X %.8X %.8X\n",
+				sde_dbg_base.vbif_dbgbus_dump_idx,
+				data[0], data[1], data[2], data[3]);
+		sde_dbg_base.vbif_dbgbus_dump_idx += DUMP_CLMN_COUNT;
+		if ((count < len) || copy_to_user(buff, evtlog_buf, len)) {
+			len = -EFAULT;
+			goto dump_done;
+		}
+		*ppos += len;
+	}
+dump_done:
+	mutex_unlock(&sde_dbg_base.mutex);
+
+	return len;
+}
+
+static const struct file_operations sde_recovery_vbif_dbgbus_fops = {
+	.open = sde_recovery_vbif_dbgbus_dump_open,
+	.read = sde_recovery_vbif_dbgbus_dump_read,
+};
+
 /**
  * sde_dbg_reg_base_release - release allocated reg dump file private data
  * @inode: debugfs inode
@@ -4256,6 +4668,37 @@
 	return 0;
 }
 
+/**
+ * sde_dbg_reg_base_is_valid_range - verify if requested memory range is valid
+ * @off: address offset in bytes
+ * @cnt: memory size in bytes
+ * Return: true if valid; false otherwise
+ */
+static bool sde_dbg_reg_base_is_valid_range(u32 off, u32 cnt)
+{
+	static struct sde_dbg_base *dbg_base = &sde_dbg_base;
+	struct sde_dbg_reg_range *node;
+	struct sde_dbg_reg_base *base;
+
+	pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
+
+	list_for_each_entry(base, &dbg_base->reg_base_list, reg_base_head) {
+		list_for_each_entry(node, &base->sub_range_list, head) {
+			pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
+					node->offset.start, node->offset.end);
+
+			if (node->offset.start <= off
+					&& off <= node->offset.end
+					&& off + cnt <= node->offset.end) {
+				pr_debug("valid range requested\n");
+				return true;
+			}
+		}
+	}
+
+	pr_err("invalid range requested\n");
+	return false;
+}
 
 /**
  * sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base
@@ -4302,6 +4745,9 @@
 	if (cnt == 0)
 		return -EINVAL;
 
+	if (!sde_dbg_reg_base_is_valid_range(off, cnt))
+		return -EINVAL;
+
 	mutex_lock(&sde_dbg_base.mutex);
 	dbg->off = off;
 	dbg->cnt = cnt;
@@ -4556,6 +5002,12 @@
 			&sde_dbg_base.panic_on_err);
 	debugfs_create_u32("reg_dump", 0600, debugfs_root,
 			&sde_dbg_base.enable_reg_dump);
+	debugfs_create_file("recovery_reg", 0400, debugfs_root, NULL,
+			&sde_recovery_reg_fops);
+	debugfs_create_file("recovery_dbgbus", 0400, debugfs_root, NULL,
+			&sde_recovery_dbgbus_fops);
+	debugfs_create_file("recovery_vbif_dbgbus", 0400, debugfs_root, NULL,
+			&sde_recovery_vbif_dbgbus_fops);
 
 	if (dbg->dbgbus_sde.entries) {
 		dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE;
@@ -4609,6 +5061,8 @@
 		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
 		dbg->dbgbus_vbif_rt.cmn.entries_size =
 				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+		dbg->dbgbus_dsi.entries = NULL;
+		dbg->dbgbus_dsi.size = 0;
 	} else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
 		dbg->dbgbus_sde.entries = dbg_bus_sde_sdm845;
 		dbg->dbgbus_sde.cmn.entries_size =
@@ -4619,7 +5073,10 @@
 		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
 		dbg->dbgbus_vbif_rt.cmn.entries_size =
 				ARRAY_SIZE(vbif_dbg_bus_msm8998);
-	} else if (IS_SM8150_TARGET(hwversion)) {
+		dbg->dbgbus_dsi.entries = dsi_dbg_bus_sdm845;
+		dbg->dbgbus_dsi.size = ARRAY_SIZE(dsi_dbg_bus_sdm845);
+	} else if (IS_SM8150_TARGET(hwversion) || IS_SM6150_TARGET(hwversion) ||
+		   IS_KONA_TARGET(hwversion)) {
 		dbg->dbgbus_sde.entries = dbg_bus_sde_sm8150;
 		dbg->dbgbus_sde.cmn.entries_size =
 				ARRAY_SIZE(dbg_bus_sde_sm8150);
@@ -4628,6 +5085,8 @@
 		dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
 		dbg->dbgbus_vbif_rt.cmn.entries_size =
 				ARRAY_SIZE(vbif_dbg_bus_msm8998);
+		dbg->dbgbus_dsi.entries = NULL;
+		dbg->dbgbus_dsi.size = 0;
 	} else {
 		pr_err("unsupported chipset id %X\n", hwversion);
 	}
@@ -4655,6 +5114,7 @@
 	sde_dbg_base.work_panic = false;
 	sde_dbg_base.panic_on_err = DEFAULT_PANIC;
 	sde_dbg_base.enable_reg_dump = DEFAULT_REGDUMP;
+	memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf));
 
 	pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
 		sde_dbg_base.evtlog->enable, sde_dbg_base.panic_on_err,
@@ -4688,6 +5148,8 @@
  */
 void sde_dbg_destroy(void)
 {
+	kfree(sde_dbg_base.regbuf.buf);
+	memset(&sde_dbg_base.regbuf, 0, sizeof(sde_dbg_base.regbuf));
 	_sde_dbg_debugfs_destroy();
 	sde_dbg_base_evtlog = NULL;
 	sde_evtlog_destroy(sde_dbg_base.evtlog);
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 2c7925c..a1bfb47 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -201,11 +201,12 @@
  * @evtlog_buf:		target buffer to print into
  * @evtlog_buf_size:	size of target buffer
  * @update_last_entry:	whether or not to stop at most recent entry
+ * @full_dump:          whether to dump full or to limit print entries
  * Returns:		number of bytes written to buffer
  */
 ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
 		char *evtlog_buf, ssize_t evtlog_buf_size,
-		bool update_last_entry);
+		bool update_last_entry, bool full_dump);
 
 /**
  * sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
@@ -329,16 +330,24 @@
 int sde_evtlog_get_filter(struct sde_dbg_evtlog *evtlog, int index,
 		char *buf, size_t bufsz);
 
+#ifndef CONFIG_DRM_SDE_RSC
+static inline void sde_rsc_debug_dump(u32 mux_sel)
+{
+}
+#else
 /**
  * sde_rsc_debug_dump - sde rsc debug dump status
- * @mux_sel:	select mux on rsc debug bus
+ * @mux_sel:»       select mux on rsc debug bus
  */
 void sde_rsc_debug_dump(u32 mux_sel);
+#endif
 
 /**
  * dsi_ctrl_debug_dump - dump dsi debug dump status
+ * @entries:	array of debug bus control values
+ * @size:	size of the debug bus control array
  */
-void dsi_ctrl_debug_dump(void);
+void dsi_ctrl_debug_dump(u32 *entries, u32 size);
 
 #else
 static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
@@ -430,7 +439,7 @@
 {
 }
 
-static inline void dsi_ctrl_debug_dump(void)
+static inline void dsi_ctrl_debug_dump(u32 entries, u32 size)
 {
 }
 
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
index 8f1da27..22e65cc 100644
--- a/drivers/gpu/drm/msm/sde_dbg_evtlog.c
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -101,8 +101,10 @@
 
 /* always dump the last entries which are not dumped yet */
 static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog,
-		bool update_last_entry)
+		bool update_last_entry, bool full_dump)
 {
+	int max_entries = full_dump ? SDE_EVTLOG_ENTRY : SDE_EVTLOG_PRINT_ENTRY;
+
 	if (!evtlog)
 		return false;
 
@@ -120,12 +122,11 @@
 			evtlog->last_dump += SDE_EVTLOG_ENTRY;
 	}
 
-	if ((evtlog->last_dump - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
+	if ((evtlog->last_dump - evtlog->first) > max_entries) {
 		pr_info("evtlog skipping %d entries, last=%d\n",
 			evtlog->last_dump - evtlog->first -
-			SDE_EVTLOG_PRINT_ENTRY,
-			evtlog->last_dump - 1);
-		evtlog->first = evtlog->last_dump - SDE_EVTLOG_PRINT_ENTRY;
+			max_entries, evtlog->last_dump - 1);
+		evtlog->first = evtlog->last_dump - max_entries;
 	}
 	evtlog->next = evtlog->first + 1;
 
@@ -134,7 +135,7 @@
 
 ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
 		char *evtlog_buf, ssize_t evtlog_buf_size,
-		bool update_last_entry)
+		bool update_last_entry, bool full_dump)
 {
 	int i;
 	ssize_t off = 0;
@@ -147,7 +148,7 @@
 	spin_lock_irqsave(&evtlog->spin_lock, flags);
 
 	/* update markers, exit if nothing to print */
-	if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry))
+	if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry, full_dump))
 		goto exit;
 
 	log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY];
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index 3de34da..456b0f7 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -172,8 +172,9 @@
 	 * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
 	 *   collection present and no DTD data present.
 	 */
+
 	if ((dbc_offset == 0) || (dbc_offset == 4)) {
-		SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+		SDE_EDID_DEBUG("EDID: no DTD or non-DTD data present\n");
 		return NULL;
 	}
 
@@ -376,7 +377,7 @@
 	edid_ext = sde_find_cea_extension(edid_ctrl->edid);
 
 	if (!edid_ext) {
-		SDE_ERROR("no cea extension\n");
+		SDE_DEBUG("no cea extension\n");
 		return;
 	}
 
@@ -545,7 +546,7 @@
 
 	SDE_EDID_DEBUG("%s +", __func__);
 	if (edid_ctrl->edid) {
-		drm_mode_connector_update_edid_property(connector,
+		drm_connector_update_edid_property(connector,
 			edid_ctrl->edid);
 
 		rc = drm_add_edid_modes(connector, edid_ctrl->edid);
@@ -555,7 +556,7 @@
 		return rc;
 	}
 
-	drm_mode_connector_update_edid_property(connector, NULL);
+	drm_connector_update_edid_property(connector, NULL);
 	SDE_EDID_DEBUG("%s null edid -", __func__);
 	return rc;
 }
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index ff5c018..90fc426 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -33,8 +33,9 @@
 
 enum sde_hdcp_version {
 	HDCP_VERSION_NONE,
-	HDCP_VERSION_1X,
-	HDCP_VERSION_2P2
+	HDCP_VERSION_1X = BIT(0),
+	HDCP_VERSION_2P2 = BIT(1),
+	HDCP_VERSION_MAX = BIT(2),
 };
 
 struct sde_hdcp_init_data {
@@ -64,6 +65,8 @@
 	int (*reauthenticate)(void *input);
 	int (*authenticate)(void *hdcp_ctrl);
 	bool (*feature_supported)(void *input);
+	void (*force_encryption)(void *input, bool enable);
+	bool (*sink_support)(void *input);
 	void (*off)(void *hdcp_ctrl);
 };
 
@@ -90,8 +93,8 @@
 
 void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data);
 void sde_hdcp_1x_deinit(void *input);
-struct sde_hdcp_ops *sde_hdcp_1x_start(void *input);
+struct sde_hdcp_ops *sde_hdcp_1x_get(void *input);
 void *sde_dp_hdcp2p2_init(struct sde_hdcp_init_data *init_data);
 void sde_dp_hdcp2p2_deinit(void *input);
-struct sde_hdcp_ops *sde_dp_hdcp2p2_start(void *input);
+struct sde_hdcp_ops *sde_dp_hdcp2p2_get(void *input);
 #endif /* __SDE_HDCP_H__ */
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
index 857b064..2cb84ec 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_1x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -14,7 +14,7 @@
 #include <linux/msm_hdcp.h>
 #include <drm/drm_dp_helper.h>
 #include "sde_hdcp.h"
-#include "hdmi.xml.h"
+#include "hdmi/hdmi.xml.h"
 #include "video/msm_hdmi_hdcp_mgr.h"
 #include "dp/dp_reg.h"
 
@@ -203,6 +203,7 @@
 	u8 bcaps;
 	u32 tp_msgid;
 	u32 an_0, an_1, aksv_0, aksv_1;
+	u32 aksv_msb, aksv_lsb;
 	bool sink_r0_ready;
 	bool reauth;
 	bool ksv_ready;
@@ -230,11 +231,9 @@
 	return count;
 }
 
-static int sde_hdcp_1x_load_keys(void *input)
+static int sde_hdcp_1x_enable_hdcp_engine(void *input)
 {
 	int rc = 0;
-	u32 aksv_lsb, aksv_msb;
-	u8 aksv[5];
 	struct dss_io_data *dp_ahb;
 	struct dss_io_data *dp_aux;
 	struct dss_io_data *dp_link;
@@ -262,30 +261,8 @@
 	dp_link = hdcp->init_data.dp_link;
 	reg_set = &hdcp->reg_set;
 
-	if (hdcp1_set_keys(hdcp->hdcp1_handle, &aksv_msb, &aksv_lsb)) {
-		pr_err("setting hdcp SW keys failed\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	pr_debug("%s: AKSV=%02x%08x\n", SDE_HDCP_STATE_NAME,
-		aksv_msb, aksv_lsb);
-
-	aksv[0] =  aksv_lsb        & 0xFF;
-	aksv[1] = (aksv_lsb >> 8)  & 0xFF;
-	aksv[2] = (aksv_lsb >> 16) & 0xFF;
-	aksv[3] = (aksv_lsb >> 24) & 0xFF;
-	aksv[4] =  aksv_msb        & 0xFF;
-
-	/* check there are 20 ones in AKSV */
-	if (sde_hdcp_1x_count_one(aksv, 5) != 20) {
-		pr_err("AKSV bit count failed\n");
-		rc = -EINVAL;
-		goto end;
-	}
-
-	DSS_REG_W(dp_aux, reg_set->aksv_lsb, aksv_lsb);
-	DSS_REG_W(dp_aux, reg_set->aksv_msb, aksv_msb);
+	DSS_REG_W(dp_aux, reg_set->aksv_lsb, hdcp->aksv_lsb);
+	DSS_REG_W(dp_aux, reg_set->aksv_msb, hdcp->aksv_msb);
 
 	/* Setup seed values for random number An */
 	DSS_REG_W(dp_link, reg_set->entropy_ctrl0, 0xB1FFB0FF);
@@ -1135,20 +1112,29 @@
 static int sde_hdcp_1x_authenticate(void *input)
 {
 	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+	int rc = 0;
 
 	if (!hdcp) {
 		pr_err("invalid input\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto error;
 	}
 
 	flush_delayed_work(&hdcp->hdcp_auth_work);
 
 	if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
 		pr_err("invalid state\n");
-		return -EINVAL;
+		rc = -EINVAL;
+		goto error;
 	}
 
-	if (!sde_hdcp_1x_load_keys(input)) {
+	rc = hdcp1_start(hdcp->hdcp1_handle, &hdcp->aksv_msb, &hdcp->aksv_lsb);
+	if (rc) {
+		pr_err("hdcp1_start failed (%d)\n", rc);
+		goto error;
+	}
+
+	if (!sde_hdcp_1x_enable_hdcp_engine(input)) {
 
 		queue_delayed_work(hdcp->workq,
 			&hdcp->hdcp_auth_work, HZ/2);
@@ -1157,7 +1143,8 @@
 		sde_hdcp_1x_update_auth_status(hdcp);
 	}
 
-	return 0;
+error:
+	return rc;
 } /* hdcp_1x_authenticate */
 
 static int sde_hdcp_1x_reauthenticate(void *input)
@@ -1166,7 +1153,7 @@
 	struct dss_io_data *io;
 	struct sde_hdcp_reg_set *reg_set;
 	struct sde_hdcp_int_set *isr;
-	u32 ret = 0, reg;
+	u32 reg;
 
 	if (!hdcp || !hdcp->init_data.dp_ahb) {
 		pr_err("invalid input\n");
@@ -1194,9 +1181,8 @@
 	DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
 
 	hdcp->hdcp_state = HDCP_STATE_INACTIVE;
-	sde_hdcp_1x_authenticate(hdcp);
 
-	return ret;
+	return sde_hdcp_1x_authenticate(hdcp);
 } /* hdcp_1x_reauthenticate */
 
 static void sde_hdcp_1x_off(void *input)
@@ -1257,6 +1243,8 @@
 
 	hdcp->sink_r0_ready = false;
 
+	hdcp1_stop(hdcp->hdcp1_handle);
+
 	pr_debug("%s: HDCP: Off\n", SDE_HDCP_STATE_NAME);
 } /* hdcp_1x_off */
 
@@ -1358,13 +1346,23 @@
 static bool sde_hdcp_1x_feature_supported(void *input)
 {
 	struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+	bool feature_supported = false;
 
 	if (!hdcp) {
 		pr_err("invalid input\n");
 		return -EINVAL;
 	}
 
-	return hdcp1_feature_supported(hdcp->hdcp1_handle);
+	feature_supported = hdcp1_feature_supported(hdcp->hdcp1_handle);
+
+	pr_debug("feature_supported = %d\n", feature_supported);
+
+	return feature_supported;
+}
+
+static bool sde_hdcp_1x_sink_support(void *input)
+{
+	return true;
 }
 
 void sde_hdcp_1x_deinit(void *input)
@@ -1485,6 +1483,7 @@
 		.reauthenticate = sde_hdcp_1x_reauthenticate,
 		.authenticate = sde_hdcp_1x_authenticate,
 		.feature_supported = sde_hdcp_1x_feature_supported,
+		.sink_support = sde_hdcp_1x_sink_support,
 		.off = sde_hdcp_1x_off
 	};
 
@@ -1541,7 +1540,7 @@
 	return NULL;
 } /* hdcp_1x_init */
 
-struct sde_hdcp_ops *sde_hdcp_1x_start(void *input)
+struct sde_hdcp_ops *sde_hdcp_1x_get(void *input)
 {
 	return ((struct sde_hdcp_1x *)input)->ops;
 }
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c
index 99532da..82f50cb 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c
@@ -48,10 +48,14 @@
 struct sde_hdcp_2x_ctrl {
 	struct hdcp2_app_data app_data;
 	u32 timeout_left;
+	u32 wait_timeout_ms;
 	u32 total_message_length;
 	bool no_stored_km;
 	bool feature_supported;
+	bool force_encryption;
 	bool authenticated;
+	bool resend_lc_init;
+	bool resend_stream_manage;
 	void *client_data;
 	void *hdcp2_ctx;
 	struct hdcp_transport_ops *client_ops;
@@ -64,7 +68,7 @@
 	enum sde_hdcp_2x_device_type device_type;
 
 	struct task_struct *thread;
-	struct completion topo_wait;
+	struct completion response_completion;
 
 	struct kthread_worker worker;
 	struct kthread_work wk_init;
@@ -73,6 +77,7 @@
 	struct kthread_work wk_timeout;
 	struct kthread_work wk_clean;
 	struct kthread_work wk_stream;
+	struct kthread_work wk_wait;
 };
 
 static const char *sde_hdcp_2x_message_name(int msg_id)
@@ -210,7 +215,10 @@
 	case LC_INIT:
 		return LC_SEND_L_PRIME;
 	case LC_SEND_L_PRIME:
-		return SKE_SEND_EKS;
+		if (hdcp->resend_lc_init)
+			return LC_INIT;
+		else
+			return SKE_SEND_EKS;
 	case SKE_SEND_EKS:
 		if (!hdcp->repeater_flag)
 			return SKE_SEND_TYPE_ID;
@@ -227,40 +235,72 @@
 	case REP_SEND_RECV_ID_LIST:
 		return REP_SEND_ACK;
 	case REP_STREAM_MANAGE:
-		return REP_STREAM_READY;
+		if (hdcp->resend_stream_manage)
+			return REP_STREAM_MANAGE;
+		else
+			return REP_STREAM_READY;
 	default:
 		pr_err("Unknown message ID (%d)\n", hdcp->last_msg);
 		return -EINVAL;
 	}
 }
 
+static void sde_hdcp_2x_wait_for_response(struct sde_hdcp_2x_ctrl *hdcp)
+{
+	switch (hdcp->last_msg) {
+	case AKE_SEND_H_PRIME:
+		if (hdcp->no_stored_km)
+			hdcp->wait_timeout_ms = HZ;
+		else
+			hdcp->wait_timeout_ms = HZ / 4;
+		break;
+	case AKE_SEND_PAIRING_INFO:
+		hdcp->wait_timeout_ms = HZ / 4;
+		break;
+	case REP_SEND_RECV_ID_LIST:
+		if (!hdcp->authenticated)
+			hdcp->wait_timeout_ms = HZ * 3;
+		else
+			hdcp->wait_timeout_ms = 0;
+		break;
+	default:
+		hdcp->wait_timeout_ms = 0;
+	}
+
+	if (hdcp->wait_timeout_ms)
+		HDCP_2X_EXECUTE(wait);
+}
+
 static void sde_hdcp_2x_wakeup_client(struct sde_hdcp_2x_ctrl *hdcp,
 				struct hdcp_transport_wakeup_data *data)
 {
 	int rc = 0;
 
-	if (hdcp && hdcp->client_ops && hdcp->client_ops->wakeup &&
-	    data && (data->cmd != HDCP_TRANSPORT_CMD_INVALID)) {
-		data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE;
+	if (!hdcp || !hdcp->client_ops || !hdcp->client_ops->wakeup ||
+			!data || (data->cmd == HDCP_TRANSPORT_CMD_INVALID))
+		return;
 
-		if (data->cmd == HDCP_TRANSPORT_CMD_SEND_MESSAGE ||
-		    data->cmd == HDCP_TRANSPORT_CMD_RECV_MESSAGE ||
-		    data->cmd == HDCP_TRANSPORT_CMD_LINK_POLL) {
-			hdcp->last_msg =
-				sde_hdcp_2x_get_next_message(hdcp, data);
-			if (hdcp->last_msg <= INVALID_MESSAGE) {
-				hdcp->last_msg = INVALID_MESSAGE;
-				return;
-			}
+	data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE;
 
-			data->message_data = &hdcp_msg_lookup[hdcp->last_msg];
+	if (data->cmd == HDCP_TRANSPORT_CMD_SEND_MESSAGE ||
+			data->cmd == HDCP_TRANSPORT_CMD_RECV_MESSAGE ||
+			data->cmd == HDCP_TRANSPORT_CMD_LINK_POLL) {
+		hdcp->last_msg =
+			sde_hdcp_2x_get_next_message(hdcp, data);
+		if (hdcp->last_msg <= INVALID_MESSAGE) {
+			hdcp->last_msg = INVALID_MESSAGE;
+			return;
 		}
 
-		rc = hdcp->client_ops->wakeup(data);
-		if (rc)
-			pr_err("error sending %s to client\n",
-			       hdcp_transport_cmd_to_str(data->cmd));
+		data->message_data = &hdcp_msg_lookup[hdcp->last_msg];
 	}
+
+	rc = hdcp->client_ops->wakeup(data);
+	if (rc)
+		pr_err("error sending %s to client\n",
+				hdcp_transport_cmd_to_str(data->cmd));
+
+	sde_hdcp_2x_wait_for_response(hdcp);
 }
 
 static inline void sde_hdcp_2x_send_message(struct sde_hdcp_2x_ctrl *hdcp)
@@ -293,6 +333,19 @@
 	return hdcp2_feature_supported(hdcp->hdcp2_ctx);
 }
 
+static void sde_hdcp_2x_force_encryption(void *data, bool enable)
+{
+	struct sde_hdcp_2x_ctrl *hdcp = data;
+
+	if (!hdcp) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	hdcp->force_encryption = enable;
+	pr_info("force_encryption=%d\n", hdcp->force_encryption);
+}
+
 static int sde_hdcp_2x_check_valid_state(struct sde_hdcp_2x_ctrl *hdcp)
 {
 	int rc = 0;
@@ -403,6 +456,9 @@
 				HDCP2_CMD_EN_ENCRYPTION, &hdcp->app_data)) {
 			hdcp->authenticated = true;
 
+			if (hdcp->force_encryption)
+				hdcp2_force_encryption(hdcp->hdcp2_ctx, 1);
+
 			cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_SUCCESS;
 			sde_hdcp_2x_wakeup_client(hdcp, &cdata);
 		}
@@ -425,7 +481,8 @@
 		}
 		break;
 	case REP_SEND_ACK:
-		pr_debug("Repeater authentication successful\n");
+		pr_debug("Repeater authentication successful. update_stream=%d\n",
+				hdcp->update_stream);
 
 		if (hdcp->update_stream) {
 			HDCP_2X_EXECUTE(stream);
@@ -565,7 +622,8 @@
 	rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_PROCESS_MSG,
 			&hdcp->app_data);
 	if (rc) {
-		pr_err("failed to process message from sink (%d)\n", rc);
+		pr_err("failed to process sink's response to %s (%d)\n",
+				sde_hdcp_2x_message_name(msg[0]), rc);
 		rc = -EINVAL;
 		goto exit;
 	}
@@ -577,7 +635,12 @@
 		goto exit;
 	}
 
-	if (msg[0] == REP_STREAM_READY) {
+	out_msg = (u32)hdcp->app_data.response.data[0];
+
+	pr_debug("message received from TZ: %s\n",
+			sde_hdcp_2x_message_name(out_msg));
+
+	if (msg[0] == REP_STREAM_READY && out_msg != REP_STREAM_MANAGE) {
 		if (!hdcp->authenticated) {
 			rc = hdcp2_app_comm(hdcp->hdcp2_ctx,
 					HDCP2_CMD_EN_ENCRYPTION,
@@ -585,6 +648,10 @@
 			if (!rc) {
 				hdcp->authenticated = true;
 
+				if (hdcp->force_encryption)
+					hdcp2_force_encryption(
+							hdcp->hdcp2_ctx, 1);
+
 				cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_SUCCESS;
 				sde_hdcp_2x_wakeup_client(hdcp, &cdata);
 			} else {
@@ -598,10 +665,17 @@
 		goto exit;
 	}
 
-	out_msg = (u32)hdcp->app_data.response.data[0];
+	hdcp->resend_lc_init = false;
+	if (msg[0] == LC_SEND_L_PRIME && out_msg == LC_INIT) {
+		pr_debug("resend %s\n", sde_hdcp_2x_message_name(out_msg));
+		hdcp->resend_lc_init = true;
+	}
 
-	pr_debug("message received from TZ: %s\n",
-			sde_hdcp_2x_message_name(out_msg));
+	hdcp->resend_stream_manage = false;
+	if (msg[0] == REP_STREAM_READY && out_msg == REP_STREAM_MANAGE) {
+		pr_debug("resend %s\n", sde_hdcp_2x_message_name(out_msg));
+		hdcp->resend_stream_manage = true;
+	}
 
 	if (out_msg == AKE_NO_STORED_KM)
 		hdcp->no_stored_km = true;
@@ -636,6 +710,36 @@
 	sde_hdcp_2x_msg_recvd(hdcp);
 }
 
+static void sde_hdcp_2x_wait_for_response_work(struct kthread_work *work)
+{
+	u32 timeout;
+	struct sde_hdcp_2x_ctrl *hdcp = container_of(work,
+			struct sde_hdcp_2x_ctrl, wk_wait);
+
+	if (!hdcp) {
+		pr_err("invalid input\n");
+		return;
+	}
+
+	if (atomic_read(&hdcp->hdcp_off)) {
+		pr_debug("invalid state: hdcp off\n");
+		return;
+	}
+
+	reinit_completion(&hdcp->response_completion);
+	timeout = wait_for_completion_timeout(&hdcp->response_completion,
+			hdcp->wait_timeout_ms);
+	if (!timeout) {
+		pr_err("completion expired, last message = %s\n",
+				sde_hdcp_2x_message_name(hdcp->last_msg));
+
+		if (!atomic_read(&hdcp->hdcp_off))
+			HDCP_2X_EXECUTE(clean);
+	}
+
+	hdcp->wait_timeout_ms = 0;
+}
+
 static int sde_hdcp_2x_wakeup(struct sde_hdcp_2x_wakeup_data *data)
 {
 	struct sde_hdcp_2x_ctrl *hdcp;
@@ -657,11 +761,14 @@
 	pr_debug("%s\n", sde_hdcp_2x_cmd_to_str(hdcp->wakeup_cmd));
 
 	rc = sde_hdcp_2x_check_valid_state(hdcp);
-	if (rc)
+	if (rc) {
+		pr_err("invalid state for command=%s\n",
+				sde_hdcp_2x_cmd_to_str(hdcp->wakeup_cmd));
 		goto exit;
+	}
 
-	if (!completion_done(&hdcp->topo_wait))
-		complete_all(&hdcp->topo_wait);
+	if (!completion_done(&hdcp->response_completion))
+		complete_all(&hdcp->response_completion);
 
 	switch (hdcp->wakeup_cmd) {
 	case HDCP_2X_CMD_START:
@@ -676,7 +783,6 @@
 		break;
 	case HDCP_2X_CMD_STOP:
 		atomic_set(&hdcp->hdcp_off, 1);
-
 		HDCP_2X_EXECUTE(clean);
 		break;
 	case HDCP_2X_CMD_MSG_SEND_SUCCESS:
@@ -684,6 +790,7 @@
 		break;
 	case HDCP_2X_CMD_MSG_SEND_FAILED:
 	case HDCP_2X_CMD_MSG_RECV_FAILED:
+	case HDCP_2X_CMD_LINK_FAILED:
 		HDCP_2X_EXECUTE(clean);
 		break;
 	case HDCP_2X_CMD_MSG_RECV_SUCCESS:
@@ -732,6 +839,7 @@
 	/* populate ops to be called by client */
 	data->ops->feature_supported = sde_hdcp_2x_client_feature_supported;
 	data->ops->wakeup = sde_hdcp_2x_wakeup;
+	data->ops->force_encryption = sde_hdcp_2x_force_encryption;
 
 	hdcp = kzalloc(sizeof(*hdcp), GFP_KERNEL);
 	if (!hdcp) {
@@ -757,8 +865,9 @@
 	kthread_init_work(&hdcp->wk_timeout,   sde_hdcp_2x_timeout_work);
 	kthread_init_work(&hdcp->wk_clean,     sde_hdcp_2x_cleanup_work);
 	kthread_init_work(&hdcp->wk_stream,    sde_hdcp_2x_query_stream_work);
+	kthread_init_work(&hdcp->wk_wait, sde_hdcp_2x_wait_for_response_work);
 
-	init_completion(&hdcp->topo_wait);
+	init_completion(&hdcp->response_completion);
 
 	*data->hdcp_data = hdcp;
 
@@ -772,6 +881,8 @@
 		goto error;
 	}
 
+	hdcp->force_encryption = false;
+
 	return 0;
 error:
 	kzfree(hdcp);
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.h b/drivers/gpu/drm/msm/sde_hdcp_2x.h
index 94c46af..33828e9 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.h
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.h
@@ -174,7 +174,8 @@
 
 struct sde_hdcp_2x_ops {
 	int (*wakeup)(struct sde_hdcp_2x_wakeup_data *data);
-	bool (*feature_supported)(void *phdcpcontext);
+	bool (*feature_supported)(void *data);
+	void (*force_encryption)(void *data, bool enable);
 };
 
 struct hdcp_transport_ops {
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
index cba2ca8..e51f216 100644
--- a/drivers/gpu/drm/msm/sde_io_util.c
+++ b/drivers/gpu/drm/msm/sde_io_util.c
@@ -235,7 +235,7 @@
 			need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
 			if (in_vreg[i].pre_on_sleep && need_sleep)
 				usleep_range(in_vreg[i].pre_on_sleep * 1000,
-					in_vreg[i].pre_on_sleep * 1000);
+					(in_vreg[i].pre_on_sleep * 1000) + 10);
 			rc = regulator_set_load(in_vreg[i].vreg,
 				in_vreg[i].enable_load);
 			if (rc < 0) {
@@ -247,7 +247,7 @@
 			rc = regulator_enable(in_vreg[i].vreg);
 			if (in_vreg[i].post_on_sleep && need_sleep)
 				usleep_range(in_vreg[i].post_on_sleep * 1000,
-					in_vreg[i].post_on_sleep * 1000);
+					(in_vreg[i].post_on_sleep * 1000) + 10);
 			if (rc < 0) {
 				DEV_ERR("%pS->%s: %s enable failed\n",
 					__builtin_return_address(0), __func__,
@@ -270,13 +270,13 @@
 			}
 			if (in_vreg[i].pre_off_sleep)
 				usleep_range(in_vreg[i].pre_off_sleep * 1000,
-					in_vreg[i].pre_off_sleep * 1000);
+					(in_vreg[i].pre_off_sleep * 1000) + 10);
 			regulator_set_load(in_vreg[i].vreg,
 				in_vreg[i].disable_load);
 			regulator_disable(in_vreg[i].vreg);
 			if (in_vreg[i].post_off_sleep)
 				usleep_range(in_vreg[i].post_off_sleep * 1000,
-					in_vreg[i].post_off_sleep * 1000);
+				(in_vreg[i].post_off_sleep * 1000) + 10);
 		}
 	}
 	return rc;
@@ -288,13 +288,13 @@
 	for (i--; i >= 0; i--) {
 		if (in_vreg[i].pre_off_sleep)
 			usleep_range(in_vreg[i].pre_off_sleep * 1000,
-				in_vreg[i].pre_off_sleep * 1000);
+				(in_vreg[i].pre_off_sleep * 1000) + 10);
 		regulator_set_load(in_vreg[i].vreg,
 			in_vreg[i].disable_load);
 		regulator_disable(in_vreg[i].vreg);
 		if (in_vreg[i].post_off_sleep)
 			usleep_range(in_vreg[i].post_off_sleep * 1000,
-				in_vreg[i].post_off_sleep * 1000);
+				(in_vreg[i].post_off_sleep * 1000) + 10);
 	}
 
 	return rc;
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index a789208..2a81d62 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -634,9 +634,13 @@
 {
 	int rc = 0;
 
-	if (reg_bus_hdl)
+	if (reg_bus_hdl) {
+		SDE_ATRACE_BEGIN("msm_bus_scale_req");
 		rc = msm_bus_scale_client_update_request(reg_bus_hdl,
 								usecase_ndx);
+		SDE_ATRACE_END("msm_bus_scale_req");
+	}
+
 	if (rc)
 		pr_err("failed to set reg bus vote rc=%d\n", rc);
 
@@ -753,6 +757,12 @@
 		}
 	}
 
+	if (of_find_property(pdev->dev.of_node, "qcom,dss-cx-ipeak", NULL))
+		phandle->dss_cx_ipeak = cx_ipeak_register(pdev->dev.of_node,
+						"qcom,dss-cx-ipeak");
+	else
+		pr_debug("cx ipeak client parse failed\n");
+
 	INIT_LIST_HEAD(&phandle->power_client_clist);
 	INIT_LIST_HEAD(&phandle->event_list);
 
@@ -817,6 +827,9 @@
 	}
 	mutex_unlock(&phandle->phandle_lock);
 
+	if (phandle->dss_cx_ipeak)
+		cx_ipeak_unregister(phandle->dss_cx_ipeak);
+
 	for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
 		sde_power_data_bus_unregister(&phandle->data_bus_handle[i]);
 
@@ -839,6 +852,68 @@
 		sde_rsc_client_destroy(phandle->rsc_client);
 }
 
+
+int sde_power_scale_reg_bus(struct sde_power_handle *phandle,
+	struct sde_power_client *pclient, u32 usecase_ndx, bool skip_lock)
+{
+	struct sde_power_client *client;
+	int rc = 0;
+	u32 max_usecase_ndx = VOTE_INDEX_DISABLE;
+
+	if (!skip_lock) {
+		mutex_lock(&phandle->phandle_lock);
+
+		if (WARN_ON(pclient->refcount == 0)) {
+			/*
+			 * This is not expected, clients calling without skip
+			 * lock are outside the power resource enable, which
+			 * means that they should have enabled the power
+			 * resource before trying to scale.
+			 */
+			rc = -EINVAL;
+			goto exit;
+		}
+	}
+
+	pr_debug("%pS: current idx:%d requested:%d client:%d\n",
+		__builtin_return_address(0), pclient->usecase_ndx,
+		usecase_ndx, pclient->id);
+
+	pclient->usecase_ndx = usecase_ndx;
+
+	list_for_each_entry(client, &phandle->power_client_clist, list) {
+		if (client->usecase_ndx < VOTE_INDEX_MAX &&
+		    client->usecase_ndx > max_usecase_ndx)
+			max_usecase_ndx = client->usecase_ndx;
+	}
+
+	rc = sde_power_reg_bus_update(phandle->reg_bus_hdl,
+						max_usecase_ndx);
+	if (rc)
+		pr_err("failed to set reg bus vote rc=%d\n", rc);
+
+exit:
+	if (!skip_lock)
+		mutex_unlock(&phandle->phandle_lock);
+
+	return rc;
+}
+
+static inline bool _resource_changed(u32 current_usecase_ndx,
+		u32 max_usecase_ndx)
+{
+	WARN_ON((current_usecase_ndx >= VOTE_INDEX_MAX)
+		|| (max_usecase_ndx >= VOTE_INDEX_MAX));
+
+	if (((current_usecase_ndx >= VOTE_INDEX_LOW) && /* enabled */
+		(max_usecase_ndx == VOTE_INDEX_DISABLE)) || /* max disabled */
+		((current_usecase_ndx == VOTE_INDEX_DISABLE) && /* disabled */
+		(max_usecase_ndx >= VOTE_INDEX_LOW))) /* max enabled */
+		return true;
+
+	return false;
+}
+
 int sde_power_resource_enable(struct sde_power_handle *phandle,
 	struct sde_power_client *pclient, bool enable)
 {
@@ -872,7 +947,15 @@
 			max_usecase_ndx = client->usecase_ndx;
 	}
 
-	if (phandle->current_usecase_ndx != max_usecase_ndx) {
+	/*
+	 * Check if we need to enable/disable the power resource, we won't
+	 * only-scale up/down the AHB vote in this API; if a client wants to
+	 * bump up the AHB clock above the LOW (default) level, it needs to
+	 * call 'sde_power_scale_reg_bus' with the desired vote after the power
+	 * resource was enabled.
+	 */
+	if (_resource_changed(phandle->current_usecase_ndx,
+			max_usecase_ndx)) {
 		changed = true;
 		prev_usecase_ndx = phandle->current_usecase_ndx;
 		phandle->current_usecase_ndx = max_usecase_ndx;
@@ -885,6 +968,8 @@
 	if (!changed)
 		goto end;
 
+	SDE_ATRACE_BEGIN("sde_power_resource_enable");
+
 	/* RSC client init */
 	sde_power_rsc_client_init(phandle);
 
@@ -908,8 +993,8 @@
 			goto vreg_err;
 		}
 
-		rc = sde_power_reg_bus_update(phandle->reg_bus_hdl,
-							max_usecase_ndx);
+		rc = sde_power_scale_reg_bus(phandle, pclient,
+				max_usecase_ndx, true);
 		if (rc) {
 			pr_err("failed to set reg bus vote rc=%d\n", rc);
 			goto reg_bus_hdl_err;
@@ -940,8 +1025,8 @@
 
 		msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
 
-		sde_power_reg_bus_update(phandle->reg_bus_hdl,
-							max_usecase_ndx);
+		sde_power_scale_reg_bus(phandle, pclient,
+				max_usecase_ndx, true);
 
 		msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
 
@@ -956,13 +1041,13 @@
 end:
 	SDE_EVT32_VERBOSE(enable, SDE_EVTLOG_FUNC_EXIT);
 	mutex_unlock(&phandle->phandle_lock);
-
+	SDE_ATRACE_END("sde_power_resource_enable");
 	return rc;
 
 clk_err:
 	sde_power_rsc_update(phandle, false);
 rsc_err:
-	sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx);
+	sde_power_scale_reg_bus(phandle, pclient, max_usecase_ndx, true);
 reg_bus_hdl_err:
 	msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
 vreg_err:
@@ -971,6 +1056,7 @@
 data_bus_hdl_err:
 	phandle->current_usecase_ndx = prev_usecase_ndx;
 	mutex_unlock(&phandle->phandle_lock);
+	SDE_ATRACE_END("sde_power_resource_enable");
 	return rc;
 }
 
@@ -984,11 +1070,47 @@
 	return phandle->current_usecase_ndx != VOTE_INDEX_DISABLE;
 }
 
+int sde_cx_ipeak_vote(struct sde_power_handle *phandle, struct dss_clk *clock,
+		u64 requested_clk_rate, u64 prev_clk_rate, bool enable_vote)
+{
+	int ret = 0;
+	u64 curr_core_clk_rate, max_core_clk_rate, prev_core_clk_rate;
+
+	if (!phandle->dss_cx_ipeak) {
+		pr_debug("%pS->%s: Invalid input\n",
+				__builtin_return_address(0), __func__);
+		return -EOPNOTSUPP;
+	}
+
+	if (strcmp("core_clk", clock->clk_name)) {
+		pr_debug("Not a core clk , cx_ipeak vote not needed\n");
+		return -EOPNOTSUPP;
+	}
+
+	curr_core_clk_rate = clock->rate;
+	max_core_clk_rate = clock->max_rate;
+	prev_core_clk_rate = prev_clk_rate;
+
+	if (enable_vote && requested_clk_rate == max_core_clk_rate &&
+				curr_core_clk_rate != requested_clk_rate)
+		ret = cx_ipeak_update(phandle->dss_cx_ipeak, true);
+	else if (!enable_vote && requested_clk_rate != max_core_clk_rate &&
+				prev_core_clk_rate == max_core_clk_rate)
+		ret = cx_ipeak_update(phandle->dss_cx_ipeak, false);
+
+	if (ret)
+		SDE_EVT32(ret, enable_vote, requested_clk_rate,
+					curr_core_clk_rate, prev_core_clk_rate);
+
+	return ret;
+}
+
 int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
 	u64 rate)
 {
 	int i, rc = -EINVAL;
 	struct dss_module_power *mp;
+	u64 prev_clk_rate, requested_clk_rate;
 
 	if (!phandle) {
 		pr_err("invalid input power handle\n");
@@ -1002,8 +1124,15 @@
 					(rate > mp->clk_config[i].max_rate))
 				rate = mp->clk_config[i].max_rate;
 
+			prev_clk_rate = mp->clk_config[i].rate;
+			requested_clk_rate = rate;
+			sde_cx_ipeak_vote(phandle, &mp->clk_config[i],
+				requested_clk_rate, prev_clk_rate, true);
 			mp->clk_config[i].rate = rate;
 			rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+			if (!rc)
+				sde_cx_ipeak_vote(phandle, &mp->clk_config[i],
+				   requested_clk_rate, prev_clk_rate, false);
 			break;
 		}
 	}
@@ -1079,6 +1208,30 @@
 	return clk;
 }
 
+int sde_power_clk_set_flags(struct sde_power_handle *phandle,
+		char *clock_name, unsigned long flags)
+{
+	struct clk *clk;
+
+	if (!phandle) {
+		pr_err("invalid input power handle\n");
+		return -EINVAL;
+	}
+
+	if (!clock_name) {
+		pr_err("invalid input clock name\n");
+		return -EINVAL;
+	}
+
+	clk = sde_power_clk_get_clk(phandle, clock_name);
+	if (!clk) {
+		pr_err("get_clk failed for clk: %s\n", clock_name);
+		return -EINVAL;
+	}
+
+	return clk_set_flags(clk, flags);
+}
+
 struct sde_power_event *sde_power_handle_register_event(
 		struct sde_power_handle *phandle,
 		u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index 29ed892..a861092 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -18,6 +18,7 @@
 #define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA	1800000000
 
 #include <linux/sde_io_util.h>
+#include <soc/qcom/cx_ipeak.h>
 
 /* event will be triggered before power handler disable */
 #define SDE_POWER_EVENT_PRE_DISABLE	0x1
@@ -35,11 +36,15 @@
  * mdss_bus_vote_type: register bus vote type
  * VOTE_INDEX_DISABLE: removes the client vote
  * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MEDIUM: keeps medium vote for register bus
+ * VOTE_INDEX_HIGH: keeps the highest vote for register bus
  * VOTE_INDEX_MAX: invalid
  */
 enum mdss_bus_vote_type {
 	VOTE_INDEX_DISABLE,
 	VOTE_INDEX_LOW,
+	VOTE_INDEX_MEDIUM,
+	VOTE_INDEX_HIGH,
 	VOTE_INDEX_MAX,
 };
 
@@ -153,6 +158,7 @@
  * @event_list: current power handle event list
  * @rsc_client: sde rsc client pointer
  * @rsc_client_init: boolean to control rsc client create
+ * @dss_cx_ipeak: client pointer for cx ipeak driver
  */
 struct sde_power_handle {
 	struct dss_module_power mp;
@@ -166,6 +172,7 @@
 	struct list_head event_list;
 	struct sde_rsc_client *rsc_client;
 	bool rsc_client_init;
+	struct cx_ipeak_client *dss_cx_ipeak;
 };
 
 /**
@@ -220,6 +227,19 @@
 	struct sde_power_client *pclient, bool enable);
 
 /**
+ * sde_power_scale_reg_bus() - Scale the registers bus for the specified client
+ * @phandle:  power handle containing the resources
+ * @pclient: client information to scale its vote
+ * @usecase_ndx: new use case to scale the reg bus
+ * @skip_lock: will skip holding the power rsrc mutex during the call, this is
+ *		for internal callers that already hold this required lock.
+ *
+ * Return: error code.
+ */
+int sde_power_scale_reg_bus(struct sde_power_handle *phandle,
+	struct sde_power_client *pclient, u32 usecase_ndx, bool skip_lock);
+
+/**
  * sde_power_resource_is_enabled() - return true if power resource is enabled
  * @pdata:  power handle containing the resources
  *
@@ -278,6 +298,17 @@
 		char *clock_name);
 
 /**
+ * sde_power_clk_set_flags() - set the clock flags
+ * @pdata:  power handle containing the resources
+ * @clock_name: clock name to get the clk pointer.
+ * @flags: flags to set
+ *
+ * Return: error code.
+ */
+int sde_power_clk_set_flags(struct sde_power_handle *pdata,
+		char *clock_name, unsigned long flags);
+
+/**
  * sde_power_data_bus_set_quota() - set data bus quota for power client
  * @phandle:  power handle containing the resources
  * @client: client information to set quota
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 84eeefd..2a7721c 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -25,20 +25,11 @@
 #define SDE_RSC_DRV_DBG_NAME		"sde_rsc_drv"
 #define SDE_RSC_WRAPPER_DBG_NAME	"sde_rsc_wrapper"
 
-/* worst case time to execute the one tcs vote(sleep/wake) - ~1ms */
-#define SINGLE_TCS_EXECUTION_TIME				1064000
+#define SINGLE_TCS_EXECUTION_TIME_V1	1064000
+#define SINGLE_TCS_EXECUTION_TIME_V2	850000
 
-/* this time is ~1ms - only wake tcs in any mode */
-#define RSC_BACKOFF_TIME_NS		 (SINGLE_TCS_EXECUTION_TIME + 100)
-
-/**
- * this time is ~1ms - only wake TCS in mode-0.
- * This time must be greater than backoff time.
- */
-#define RSC_MODE_THRESHOLD_TIME_IN_NS	(RSC_BACKOFF_TIME_NS + 2700)
-
-/* this time is ~2ms - sleep+ wake TCS in mode-1 */
-#define RSC_TIME_SLOT_0_NS		((SINGLE_TCS_EXECUTION_TIME * 2) + 100)
+#define RSC_MODE_INSTRUCTION_TIME	100
+#define RSC_MODE_THRESHOLD_OVERHEAD	2700
 
 #define DEFAULT_PANEL_FPS		60
 #define DEFAULT_PANEL_JITTER_NUMERATOR	2
@@ -330,9 +321,9 @@
 	struct sde_rsc_cmd_config *cmd_config)
 {
 	const u32 cxo_period_ns = 52;
-	u64 rsc_backoff_time_ns = RSC_BACKOFF_TIME_NS;
-	u64 rsc_mode_threshold_time_ns = RSC_MODE_THRESHOLD_TIME_IN_NS;
-	u64 rsc_time_slot_0_ns = RSC_TIME_SLOT_0_NS;
+	u64 rsc_backoff_time_ns = rsc->backoff_time_ns;
+	u64 rsc_mode_threshold_time_ns = rsc->mode_threshold_time_ns;
+	u64 rsc_time_slot_0_ns = rsc->time_slot_0_ns;
 	u64 rsc_time_slot_1_ns;
 	const u64 pdc_jitter = 20; /* 20% more */
 
@@ -587,8 +578,12 @@
 			msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
 		if (!rc) {
 			pr_err("Timeout waiting for vsync\n");
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait),
+			rc = -ETIMEDOUT;
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc,
 				SDE_EVTLOG_ERROR);
+		} else {
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc);
+			rc = 0;
 		}
 	}
 end:
@@ -643,8 +638,12 @@
 			msecs_to_jiffies(PRIMARY_VBLANK_WORST_CASE_MS*2));
 		if (!rc) {
 			pr_err("Timeout waiting for vsync\n");
-			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait),
+			rc = -ETIMEDOUT;
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc,
 				SDE_EVTLOG_ERROR);
+		} else {
+			SDE_EVT32(atomic_read(&rsc->rsc_vsync_wait), rc);
+			rc = 0;
 		}
 	}
 
@@ -967,9 +966,6 @@
 	rsc = s->private;
 
 	mutex_lock(&rsc->client_lock);
-	ret = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-	if (ret)
-		goto end;
 
 	seq_printf(s, "rsc current state:%d\n", rsc->current_state);
 	seq_printf(s, "wraper backoff time(ns):%d\n",
@@ -995,12 +991,17 @@
 		seq_printf(s, "\t client:%s state:%d\n",
 				client->name, client->current_state);
 
+	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
+		pr_debug("debug node is not supported during idle state\n");
+		seq_puts(s, "hw state is not supported during idle pc\n");
+		goto end;
+	}
+
 	if (rsc->hw_ops.debug_show) {
 		ret = rsc->hw_ops.debug_show(s, rsc);
 		if (ret)
 			pr_err("sde rsc: hw debug failed ret:%d\n", ret);
 	}
-	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 
 end:
 	mutex_unlock(&rsc->client_lock);
@@ -1024,21 +1025,22 @@
 {
 	struct sde_rsc_priv *rsc = file->private_data;
 	char buffer[MAX_BUFFER_SIZE];
-	int blen = 0, rc;
+	int blen = 0;
 
 	if (*ppos || !rsc || !rsc->hw_ops.mode_ctrl)
 		return 0;
 
 	mutex_lock(&rsc->client_lock);
-	rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-	if (rc)
+	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
+		pr_debug("debug node is not supported during idle state\n");
+		blen = snprintf(buffer, MAX_BUFFER_SIZE,
+				"hw state is not supported during idle pc\n");
 		goto end;
+	}
 
 	blen = rsc->hw_ops.mode_ctrl(rsc, MODE_READ, buffer,
 							MAX_BUFFER_SIZE, 0);
 
-	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
-
 end:
 	mutex_unlock(&rsc->client_lock);
 	if (blen <= 0)
@@ -1090,14 +1092,14 @@
 	}
 
 	mutex_lock(&rsc->client_lock);
-	rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-	if (rc)
-		goto clk_enable_fail;
+	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
+		pr_debug("debug node is not supported during idle state\n");
+		goto state_check;
+	}
 
 	rsc->hw_ops.mode_ctrl(rsc, MODE_UPDATE, NULL, 0, mode_state);
-	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 
-clk_enable_fail:
+state_check:
 	mutex_unlock(&rsc->client_lock);
 end:
 	kfree(input);
@@ -1116,21 +1118,22 @@
 {
 	struct sde_rsc_priv *rsc = file->private_data;
 	char buffer[MAX_BUFFER_SIZE];
-	int blen = 0, rc;
+	int blen = 0;
 
 	if (*ppos || !rsc || !rsc->hw_ops.hw_vsync)
 		return 0;
 
 	mutex_lock(&rsc->client_lock);
-	rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-	if (rc)
+	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
+		pr_debug("debug node is not supported during idle state\n");
+		blen = snprintf(buffer, MAX_BUFFER_SIZE,
+				"hw state is not supported during idle pc\n");
 		goto end;
+	}
 
 	blen = rsc->hw_ops.hw_vsync(rsc, VSYNC_READ, buffer,
 						MAX_BUFFER_SIZE, 0);
 
-	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
-
 end:
 	mutex_unlock(&rsc->client_lock);
 	if (blen <= 0)
@@ -1175,9 +1178,10 @@
 	vsync_state &= 0x7;
 
 	mutex_lock(&rsc->client_lock);
-	rc = sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, true);
-	if (rc)
-		goto clk_en_fail;
+	if (rsc->current_state == SDE_RSC_IDLE_STATE) {
+		pr_debug("debug node is not supported during idle state\n");
+		goto state_check;
+	}
 
 	if (vsync_state)
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_ENABLE, NULL,
@@ -1185,9 +1189,7 @@
 	else
 		rsc->hw_ops.hw_vsync(rsc, VSYNC_DISABLE, NULL, 0, 0);
 
-	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
-
-clk_en_fail:
+state_check:
 	mutex_unlock(&rsc->client_lock);
 end:
 	kfree(input);
@@ -1346,6 +1348,20 @@
 	of_property_read_u32(pdev->dev.of_node, "qcom,sde-rsc-version",
 								&rsc->version);
 
+	if (rsc->version == SDE_RSC_REV_2)
+		rsc->single_tcs_execution_time = SINGLE_TCS_EXECUTION_TIME_V2;
+	else
+		rsc->single_tcs_execution_time = SINGLE_TCS_EXECUTION_TIME_V1;
+
+	rsc->backoff_time_ns = rsc->single_tcs_execution_time
+					+ RSC_MODE_INSTRUCTION_TIME;
+
+	rsc->mode_threshold_time_ns = rsc->backoff_time_ns
+					+ RSC_MODE_THRESHOLD_OVERHEAD;
+
+	rsc->time_slot_0_ns = (rsc->single_tcs_execution_time * 2)
+					+ RSC_MODE_INSTRUCTION_TIME;
+
 	ret = sde_power_resource_init(pdev, &rsc->phandle);
 	if (ret) {
 		pr_err("sde rsc:power resource init failed ret:%d\n", ret);
@@ -1464,6 +1480,7 @@
 	.driver     = {
 		.name   = "sde_rsc",
 		.of_match_table = dt_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index 2dacfab..994bdab 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -95,9 +95,6 @@
 #define SDE_RSC_MODE_1_VAL		1
 #define MAX_MODE2_ENTRY_TRY		3
 
-#define SDE_RSC_REV_1			0x1
-#define SDE_RSC_REV_2			0x2
-
 static void rsc_event_trigger(struct sde_rsc_priv *rsc, uint32_t event_type)
 {
 	struct sde_rsc_event *event;
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
index 5a916e4..c63ce8e 100644
--- a/drivers/gpu/drm/msm/sde_rsc_priv.h
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -24,6 +24,9 @@
 
 #define MAX_COUNT_SIZE_SUPPORTED	128
 
+#define SDE_RSC_REV_1			0x1
+#define SDE_RSC_REV_2			0x2
+
 struct sde_rsc_priv;
 
 /**
@@ -140,6 +143,12 @@
  *			invalid state. It can be blocked by this boolean entry.
  * primary_client:	A client which is allowed to make command state request
  *			and ab/ib vote on display rsc
+ * single_tcs_execution_time: worst case time to execute one tcs vote
+ *			(sleep/wake)
+ * backoff_time_ns:	time to only wake tcs in any mode
+ * mode_threshold_time_ns: time to wake TCS in mode-0, must be greater than
+ *			backoff time
+ * time_slot_0_ns:	time for sleep & wake TCS in mode-1
  * master_drm:		Primary client waits for vsync on this drm object based
  *			on crtc id
  * rsc_vsync_wait:   Refcount to indicate if we have to wait for the vsync.
@@ -172,6 +181,11 @@
 	bool power_collapse_block;
 	struct sde_rsc_client *primary_client;
 
+	u32 single_tcs_execution_time;
+	u32 backoff_time_ns;
+	u32 mode_threshold_time_ns;
+	u32 time_slot_0_ns;
+
 	struct drm_device *master_drm;
 	atomic_t rsc_vsync_wait;
 	wait_queue_head_t rsc_vsync_waitq;
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index 306119e..c424e52 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012,2018 The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight Embedded Trace Buffer driver
  */
@@ -674,7 +674,6 @@
 	spin_lock_init(&drvdata->spinlock);
 
 	drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
-	pm_runtime_put(&adev->dev);
 
 	if (drvdata->buffer_depth & 0x80000000)
 		return -EINVAL;
@@ -700,6 +699,7 @@
 	ret = misc_register(&drvdata->miscdev);
 	if (ret)
 		goto err_misc_register;
+	pm_runtime_put(&adev->dev);
 
 	return 0;
 
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 8954836..ecc2259 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight System Trace Macrocell driver
  *
@@ -293,7 +293,7 @@
 	if (!drvdata || !drvdata->csdev)
 		return;
 	/* If any OST entity is enabled do not disable the device */
-	if (drvdata->entities == NULL)
+	if (!bitmap_empty(drvdata->entities, OST_ENTITY_MAX))
 		return;
 	coresight_disable(drvdata->csdev);
 }
@@ -866,11 +866,6 @@
 				 BYTES_PER_CHANNEL), resource_size(res));
 	}
 	bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
-	/* Store the driver data pointer for use in exported functions */
-	ret = stm_set_ost_params(drvdata, bitmap_size);
-	if (ret)
-		return ret;
-
 
 	guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
 	if (!guaranteed)
@@ -900,6 +895,11 @@
 		goto stm_unregister;
 	}
 
+	/* Store the driver data pointer for use in exported functions */
+	ret = stm_set_ost_params(drvdata, bitmap_size);
+	if (ret)
+		goto stm_unregister;
+
 	pm_runtime_put(&adev->dev);
 
 	dev_info(dev, "%s initialized with master %s\n", (char *)id->data,
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 34c4c74..72cbab2 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -180,7 +180,7 @@
 		kfree(buf);
 
 	if (!ret) {
-		coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
+		coresight_cti_map_trigin(drvdata->cti_reset, 0, 0);
 		coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
 		dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
 	}
@@ -262,7 +262,7 @@
 
 	spin_unlock_irqrestore(&drvdata->spinlock, flags);
 
-	coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
+	coresight_cti_unmap_trigin(drvdata->cti_reset, 0, 0);
 	coresight_cti_unmap_trigout(drvdata->cti_flush, 1, 0);
 
 	dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index d623cb6..b626f34 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1147,6 +1147,9 @@
 		tmc_etr_byte_cntr_stop(drvdata->byte_cntr);
 		coresight_cti_unmap_trigin(drvdata->cti_reset, 2, 0);
 		coresight_cti_unmap_trigout(drvdata->cti_flush, 3, 0);
+		/* Free memory outside the spinlock if need be */
+		if (drvdata->etr_buf)
+			tmc_etr_free_sysfs_buf(drvdata->etr_buf);
 	}
 	mutex_unlock(&drvdata->mem_lock);
 	dev_info(drvdata->dev, "TMC-ETR disabled\n");
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 26250d6..344e1fd 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -667,8 +667,6 @@
 		drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
 	}
 
-	pm_runtime_put(&adev->dev);
-
 	ret = of_get_coresight_csr_name(adev->dev.of_node, &drvdata->csr_name);
 	if (ret)
 		dev_err(dev, "No csr data\n");
@@ -685,12 +683,16 @@
 		dev_err(dev, "invalid cti data\n");
 	} else if (ctidata && ctidata->nr_ctis == 2) {
 		drvdata->cti_flush = coresight_cti_get(ctidata->names[0]);
-		if (IS_ERR(drvdata->cti_flush))
-			dev_err(dev, "failed to get flush cti\n");
+		if (IS_ERR(drvdata->cti_flush)) {
+			dev_err(dev, "failed to get flush cti, defer probe\n");
+			return -EPROBE_DEFER;
+		}
 
 		drvdata->cti_reset = coresight_cti_get(ctidata->names[1]);
-		if (IS_ERR(drvdata->cti_reset))
-			dev_err(dev, "failed to get reset cti\n");
+		if (IS_ERR(drvdata->cti_reset)) {
+			dev_err(dev, "failed to get reset cti, defer probe\n");
+			return -EPROBE_DEFER;
+		}
 	}
 
 	desc.pdata = pdata;
@@ -727,6 +729,10 @@
 		coresight_unregister(drvdata->csdev);
 		goto out_iommu_deinit;
 	}
+
+	if (!ret)
+		pm_runtime_put(&adev->dev);
+
 	return ret;
 
 out_iommu_deinit:
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index d807b8c..9b3314d 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -646,8 +646,7 @@
 
 static int tpda_parse_of_data(struct tpda_drvdata *drvdata)
 {
-	int len, port, i, ret;
-	const __be32 *prop;
+	int ret;
 	struct device_node *node = drvdata->dev->of_node;
 
 	ret = of_property_read_u32(node, "qcom,tpda-atid", &drvdata->atid);
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index d04ce1f..f603e54 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -230,6 +230,7 @@
 	uint32_t		trig_patt_val[TPDM_CMB_PATT_CMP];
 	uint32_t		trig_patt_mask[TPDM_CMB_PATT_CMP];
 	bool			trig_ts;
+	bool			ts_all;
 	uint32_t		msr[TPDM_CMB_MAX_MSR];
 	uint8_t			read_ctl_reg;
 	struct mcmb_dataset	*mcmb;
@@ -627,6 +628,10 @@
 		val = val | BIT(1);
 	else
 		val = val & ~BIT(1);
+	if (drvdata->cmb->ts_all)
+		val = val | BIT(2);
+	else
+		val = val & ~BIT(2);
 	tpdm_writel(drvdata, val, TPDM_CMB_TIER);
 
 	__tpdm_config_cmb_msr(drvdata);
@@ -3564,6 +3569,44 @@
 }
 static DEVICE_ATTR_RW(cmb_patt_ts);
 
+static ssize_t cmb_ts_all_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->cmb->ts_all);
+}
+
+static ssize_t cmb_ts_all_store(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf,
+				      size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->cmb->ts_all = true;
+	else
+		drvdata->cmb->ts_all = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR_RW(cmb_ts_all);
+
 static ssize_t cmb_trig_patt_val_lsb_show(struct device *dev,
 					       struct device_attribute *attr,
 					       char *buf)
@@ -4049,6 +4092,7 @@
 	&dev_attr_cmb_patt_val.attr,
 	&dev_attr_cmb_patt_mask.attr,
 	&dev_attr_cmb_patt_ts.attr,
+	&dev_attr_cmb_ts_all.attr,
 	&dev_attr_cmb_trig_patt_val_lsb.attr,
 	&dev_attr_cmb_trig_patt_mask_lsb.attr,
 	&dev_attr_cmb_trig_patt_val_msb.attr,
@@ -4203,8 +4247,8 @@
 			if (ret)
 				return ret;
 
-			drvdata->treg[i] = devm_regulator_get(drvdata,
-					    treg_name);
+			drvdata->treg[i] = devm_regulator_get(drvdata->dev,
+							treg_name);
 			if (IS_ERR(drvdata->treg[i]))
 				return PTR_ERR(drvdata->treg[i]);
 		}
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index e8ac7e7..7667707 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -91,6 +91,23 @@
 				 csdev, coresight_id_match);
 }
 
+static int coresight_reset_sink(struct device *dev, void *data)
+{
+	struct coresight_device *csdev = to_coresight_device(dev);
+
+	if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+	     csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+	     csdev->activated)
+		csdev->activated = false;
+
+	return 0;
+}
+
+static void coresight_reset_all_sink(void)
+{
+	bus_for_each_dev(&coresight_bustype, NULL, NULL, coresight_reset_sink);
+}
+
 static int coresight_find_link_inport(struct coresight_device *csdev,
 				      struct coresight_device *parent)
 {
@@ -1017,6 +1034,9 @@
 		__coresight_disable(csdev);
 	}
 
+	/* Reset all activated sinks */
+	coresight_reset_all_sink();
+
 	mutex_unlock(&coresight_mutex);
 	return size;
 }
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 7482de5..f4a3fe2 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -60,7 +60,7 @@
 
 config IOMMU_IO_PGTABLE_FAST
 	bool "Fast ARMv7/v8 Long Descriptor Format"
-	depends on ARM64_DMA_USE_IOMMU || ARM_DMA_USE_IOMMU
+	depends on (ARM64_DMA_USE_IOMMU || ARM_DMA_USE_IOMMU) && IOMMU_DMA
 	help
           Enable support for a subset of the ARM long descriptor pagetable
 	  format.  This allocator achieves fast performance by
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 82f9bc5..40bb47a 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -60,6 +60,7 @@
 
 #include <soc/qcom/msm_tz_smmu.h>
 #include <soc/qcom/scm.h>
+#include <asm/dma-iommu.h>
 #include "io-pgtable.h"
 #include "arm-smmu-regs.h"
 
@@ -130,7 +131,6 @@
 	GENERIC_SMMU,
 	ARM_MMU500,
 	CAVIUM_SMMUV2,
-	QCOM_SMMUV2,
 	QCOM_SMMUV500,
 };
 
@@ -2055,7 +2055,7 @@
 	 * Free the domain resources. We assume that all devices have
 	 * already been detached.
 	 */
-	iommu_put_dma_cookie(domain);
+	arm_iommu_put_dma_cookie(domain);
 	arm_smmu_destroy_domain_context(domain);
 	kfree(smmu_domain);
 }
@@ -3152,7 +3152,7 @@
 			ret = -ENODEV;
 			break;
 		}
-		info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
+		info->ops = smmu_domain->pgtbl_ops;
 		ret = 0;
 		break;
 	}
@@ -3628,166 +3628,6 @@
 	.iova_to_pte = arm_smmu_iova_to_pte,
 };
 
-#define IMPL_DEF1_MICRO_MMU_CTRL	0
-#define MICRO_MMU_CTRL_LOCAL_HALT_REQ	(1 << 2)
-#define MICRO_MMU_CTRL_IDLE		(1 << 3)
-
-/* Definitions for implementation-defined registers */
-#define ACTLR_QCOM_OSH_SHIFT		28
-#define ACTLR_QCOM_OSH			1
-
-#define ACTLR_QCOM_ISH_SHIFT		29
-#define ACTLR_QCOM_ISH			1
-
-#define ACTLR_QCOM_NSH_SHIFT		30
-#define ACTLR_QCOM_NSH			1
-
-static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
-{
-	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
-	u32 tmp;
-
-	if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
-					tmp, (tmp & MICRO_MMU_CTRL_IDLE),
-					0, 30000)) {
-		dev_err(smmu->dev, "Couldn't halt SMMU!\n");
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
-{
-	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
-	u32 reg;
-
-	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
-	reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
-
-	if (arm_smmu_is_static_cb(smmu)) {
-		phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
-							smmu->phys_addr;
-
-		if (scm_io_write(impl_def1_base_phys +
-					IMPL_DEF1_MICRO_MMU_CTRL, reg)) {
-			dev_err(smmu->dev,
-				"scm_io_write fail. SMMU might not be halted");
-			return -EINVAL;
-		}
-	} else {
-		writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
-	}
-
-	return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
-}
-
-static int qsmmuv2_halt(struct arm_smmu_device *smmu)
-{
-	return __qsmmuv2_halt(smmu, true);
-}
-
-static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
-{
-	return __qsmmuv2_halt(smmu, false);
-}
-
-static void qsmmuv2_resume(struct arm_smmu_device *smmu)
-{
-	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
-	u32 reg;
-
-	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
-	reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
-
-	if (arm_smmu_is_static_cb(smmu)) {
-		phys_addr_t impl_def1_base_phys = impl_def1_base - smmu->base +
-							smmu->phys_addr;
-
-		if (scm_io_write(impl_def1_base_phys +
-				IMPL_DEF1_MICRO_MMU_CTRL, reg))
-			dev_err(smmu->dev,
-				"scm_io_write fail. SMMU might not be resumed");
-	} else {
-		writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
-	}
-}
-
-static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
-{
-	int i;
-	u32 val;
-	struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
-	void __iomem *cb_base;
-
-	/*
-	 * SCTLR.M must be disabled here per ARM SMMUv2 spec
-	 * to prevent table walks with an inconsistent state.
-	 */
-	for (i = 0; i < smmu->num_context_banks; ++i) {
-		cb_base = ARM_SMMU_CB(smmu, i);
-		val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
-		ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
-		ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
-		writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
-	}
-
-	/* Program implementation defined registers */
-	qsmmuv2_halt(smmu);
-	for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
-		writel_relaxed(regs[i].value,
-			ARM_SMMU_GR0(smmu) + regs[i].offset);
-	qsmmuv2_resume(smmu);
-}
-
-static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
-				dma_addr_t iova)
-{
-	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	int ret;
-	phys_addr_t phys = 0;
-	unsigned long flags;
-	u32 sctlr, sctlr_orig, fsr;
-	void __iomem *cb_base;
-
-	ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
-	if (ret)
-		return ret;
-
-	spin_lock_irqsave(&smmu->atos_lock, flags);
-	cb_base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
-
-	qsmmuv2_halt_nowait(smmu);
-	writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
-	qsmmuv2_wait_for_halt(smmu);
-
-	/* clear FSR to allow ATOS to log any faults */
-	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
-	writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
-
-	/* disable stall mode momentarily */
-	sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
-	sctlr = sctlr_orig & ~SCTLR_CFCFG;
-	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
-
-	phys = __arm_smmu_iova_to_phys_hard(domain, iova);
-
-	/* restore SCTLR */
-	writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
-
-	qsmmuv2_resume(smmu);
-	spin_unlock_irqrestore(&smmu->atos_lock, flags);
-
-	arm_smmu_power_off(smmu_domain->smmu->pwr);
-	return phys;
-}
-
-struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
-	.device_reset = qsmmuv2_device_reset,
-	.iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
-};
-
 static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
 {
 	int i;
@@ -4445,7 +4285,6 @@
 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
-ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
 ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
 		    &qsmmuv500_arch_ops);
 
@@ -4456,7 +4295,6 @@
 	{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
 	{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
 	{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
-	{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
 	{ .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
 	{ },
 };
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f92d411..804a226 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -267,6 +267,10 @@
 	int vmid = VMID_HLOS;
 	int min_iova_align = 0;
 
+	/* Called for each device in the iommu group */
+	if (cookie->guard_page)
+		return 0;
+
 	iommu_domain_get_attr(domain,
 			DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN,
 			&min_iova_align);
@@ -725,7 +729,7 @@
  * avoid individually crossing any boundaries, so we merely need to check a
  * segment's start address to avoid concatenating across one.
  */
-static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
+int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
 		dma_addr_t dma_addr)
 {
 	struct scatterlist *s, *cur = sg;
@@ -778,7 +782,7 @@
  * If mapping failed, then just restore the original list,
  * but making sure the DMA fields are invalidated.
  */
-static void __invalidate_sg(struct scatterlist *sg, int nents)
+void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents)
 {
 	struct scatterlist *s;
 	int i;
@@ -800,14 +804,10 @@
  * impedance-matching, to be able to hand off a suitably-aligned list,
  * but still preserve the original offsets and sizes for the caller.
  */
-int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
-		int nents, int prot)
+size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad,
+				struct scatterlist *sg, int nents)
 {
-	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
-	struct iommu_dma_cookie *cookie = domain->iova_cookie;
-	struct iova_domain *iovad = &cookie->iovad;
 	struct scatterlist *s, *prev = NULL;
-	dma_addr_t iova;
 	size_t iova_len = 0;
 	unsigned long mask = dma_get_seg_boundary(dev);
 	int i;
@@ -851,6 +851,26 @@
 		prev = s;
 	}
 
+	return iova_len;
+}
+
+int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
+		int nents, int prot)
+{
+	struct iommu_domain *domain;
+	struct iommu_dma_cookie *cookie;
+	struct iova_domain *iovad;
+	dma_addr_t iova;
+	size_t iova_len;
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain)
+		return 0;
+	cookie = domain->iova_cookie;
+	iovad = &cookie->iovad;
+
+	iova_len = iommu_dma_prepare_map_sg(dev, iovad, sg, nents);
+
 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
 	if (!iova)
 		goto out_restore_sg;
@@ -862,12 +882,12 @@
 	if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
 		goto out_free_iova;
 
-	return __finalise_sg(dev, sg, nents, iova);
+	return iommu_dma_finalise_sg(dev, sg, nents, iova);
 
 out_free_iova:
 	iommu_dma_free_iova(domain, cookie, iova, iova_len);
 out_restore_sg:
-	__invalidate_sg(sg, nents);
+	iommu_dma_invalidate_sg(sg, nents);
 	return 0;
 }
 
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 0069a05..10ab30e 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -13,7 +13,10 @@
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <linux/pci.h>
+#include <linux/dma-iommu.h>
+#include <linux/iova.h>
 #include <trace/events/iommu.h>
+#include "io-pgtable.h"
 
 #include <soc/qcom/secure_buffer.h>
 #include <linux/arm-smmu-errata.h>
@@ -22,14 +25,6 @@
 #define FAST_PAGE_SHIFT		12
 #define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
 #define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
-#define FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
-#define FAST_MAIR_ATTR_IDX_CACHE	1
-#define FAST_PTE_ATTRINDX_SHIFT		2
-#define FAST_PTE_ATTRINDX_MASK		0x7
-#define FAST_PTE_SH_SHIFT		8
-#define FAST_PTE_SH_MASK	   (((av8l_fast_iopte)0x3) << FAST_PTE_SH_SHIFT)
-#define FAST_PTE_SH_OS             (((av8l_fast_iopte)2) << FAST_PTE_SH_SHIFT)
-#define FAST_PTE_SH_IS             (((av8l_fast_iopte)3) << FAST_PTE_SH_SHIFT)
 
 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
 				 bool coherent)
@@ -41,40 +36,6 @@
 	return prot;
 }
 
-static int __get_iommu_pgprot(unsigned long attrs, int prot,
-			      bool coherent)
-{
-	if (!(attrs & DMA_ATTR_EXEC_MAPPING))
-		prot |= IOMMU_NOEXEC;
-	if ((attrs & DMA_ATTR_STRONGLY_ORDERED))
-		prot |= IOMMU_MMIO;
-	if (coherent)
-		prot |= IOMMU_CACHE;
-
-	return prot;
-}
-
-static void fast_dmac_clean_range(struct dma_fast_smmu_mapping *mapping,
-				  void *start, void *end)
-{
-	if (!mapping->is_smmu_pt_coherent)
-		dmac_clean_range(start, end);
-}
-
-static bool __fast_is_pte_coherent(av8l_fast_iopte *ptep)
-{
-	int attr_idx = (*ptep & (FAST_PTE_ATTRINDX_MASK <<
-			FAST_PTE_ATTRINDX_SHIFT)) >>
-			FAST_PTE_ATTRINDX_SHIFT;
-
-	if ((attr_idx == FAST_MAIR_ATTR_IDX_CACHE) &&
-		(((*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_IS) ||
-		  (*ptep & FAST_PTE_SH_MASK) == FAST_PTE_SH_OS))
-		return true;
-
-	return false;
-}
-
 static bool is_dma_coherent(struct device *dev, unsigned long attrs)
 {
 	bool is_coherent;
@@ -91,6 +52,16 @@
 	return is_coherent;
 }
 
+static struct dma_fast_smmu_mapping *dev_get_mapping(struct device *dev)
+{
+	struct iommu_domain *domain;
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain)
+		return ERR_PTR(-EINVAL);
+	return domain->iova_cookie;
+}
+
 /*
  * Checks if the allocated range (ending at @end) covered the upcoming
  * stale bit.  We don't need to know exactly where the range starts since
@@ -195,7 +166,7 @@
 
 		iommu_tlbiall(mapping->domain);
 		mapping->have_stale_tlbs = false;
-		av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync);
+		av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, skip_sync);
 	}
 
 	iova =  (bit << FAST_PAGE_SHIFT) + mapping->base;
@@ -346,39 +317,21 @@
 		set_bit(PG_dcache_clean, &page->flags);
 }
 
-static int __fast_dma_direction_to_prot(enum dma_data_direction dir)
-{
-	switch (dir) {
-	case DMA_BIDIRECTIONAL:
-		return IOMMU_READ | IOMMU_WRITE;
-	case DMA_TO_DEVICE:
-		return IOMMU_READ;
-	case DMA_FROM_DEVICE:
-		return IOMMU_WRITE;
-	default:
-		return 0;
-	}
-}
-
 static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
 				   unsigned long offset, size_t size,
 				   enum dma_data_direction dir,
 				   unsigned long attrs)
 {
-	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	dma_addr_t iova;
 	unsigned long flags;
-	av8l_fast_iopte *pmd;
 	phys_addr_t phys_plus_off = page_to_phys(page) + offset;
 	phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE);
 	unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK;
 	size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE);
-	int nptes = len >> FAST_PAGE_SHIFT;
 	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
-	int prot = __fast_dma_direction_to_prot(dir);
 	bool is_coherent = is_dma_coherent(dev, attrs);
-
-	prot = __get_iommu_pgprot(attrs, prot, is_coherent);
+	int prot = dma_info_to_prot(dir, is_coherent, attrs);
 
 	if (!skip_sync && !is_coherent)
 		__fast_dma_page_cpu_to_dev(phys_to_page(phys_to_map),
@@ -391,13 +344,10 @@
 	if (unlikely(iova == DMA_ERROR_CODE))
 		goto fail;
 
-	pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
-
-	if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
+	if (unlikely(av8l_fast_map_public(mapping->pgtbl_ops, iova,
+					  phys_to_map, len, prot)))
 		goto fail_free_iova;
 
-	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
-
 	spin_unlock_irqrestore(&mapping->lock, flags);
 
 	trace_map(mapping->domain, iova, phys_to_map, len, prot);
@@ -414,22 +364,25 @@
 			       size_t size, enum dma_data_direction dir,
 			       unsigned long attrs)
 {
-	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	unsigned long flags;
-	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
 	unsigned long offset = iova & ~FAST_PAGE_MASK;
 	size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
-	int nptes = len >> FAST_PAGE_SHIFT;
-	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
 	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
 	bool is_coherent = is_dma_coherent(dev, attrs);
 
-	if (!skip_sync && !is_coherent)
-		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
+	if (!skip_sync && !is_coherent) {
+		phys_addr_t phys;
+
+		phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova);
+		WARN_ON(!phys);
+
+		__fast_dma_page_dev_to_cpu(phys_to_page(phys), offset,
+						size, dir);
+	}
 
 	spin_lock_irqsave(&mapping->lock, flags);
-	av8l_fast_unmap_public(pmd, len);
-	fast_dmac_clean_range(mapping, pmd, pmd + nptes);
+	av8l_fast_unmap_public(mapping->pgtbl_ops, iova, len);
 	__fast_smmu_free_iova(mapping, iova - offset, len);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 
@@ -439,53 +392,137 @@
 static void fast_smmu_sync_single_for_cpu(struct device *dev,
 		dma_addr_t iova, size_t size, enum dma_data_direction dir)
 {
-	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
-	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	unsigned long offset = iova & ~FAST_PAGE_MASK;
-	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
 
-	if (!__fast_is_pte_coherent(pmd))
-		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
+	if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) {
+		phys_addr_t phys;
+
+		phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova);
+		WARN_ON(!phys);
+
+		__fast_dma_page_dev_to_cpu(phys_to_page(phys), offset,
+						size, dir);
+	}
 }
 
 static void fast_smmu_sync_single_for_device(struct device *dev,
 		dma_addr_t iova, size_t size, enum dma_data_direction dir)
 {
-	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
-	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	unsigned long offset = iova & ~FAST_PAGE_MASK;
-	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
 
-	if (!__fast_is_pte_coherent(pmd))
-		__fast_dma_page_cpu_to_dev(page, offset, size, dir);
+	if (!av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova)) {
+		phys_addr_t phys;
+
+		phys = av8l_fast_iova_to_phys_public(mapping->pgtbl_ops, iova);
+		WARN_ON(!phys);
+
+		__fast_dma_page_cpu_to_dev(phys_to_page(phys), offset,
+						size, dir);
+	}
+}
+
+static void fast_smmu_sync_sg_for_cpu(struct device *dev,
+				    struct scatterlist *sgl, int nelems,
+				    enum dma_data_direction dir)
+{
+	struct scatterlist *sg;
+	dma_addr_t iova = sg_dma_address(sgl);
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	int i;
+
+	if (av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova))
+		return;
+
+	for_each_sg(sgl, sg, nelems, i)
+		__dma_unmap_area(sg_virt(sg), sg->length, dir);
+}
+
+static void fast_smmu_sync_sg_for_device(struct device *dev,
+				       struct scatterlist *sgl, int nelems,
+				       enum dma_data_direction dir)
+{
+	struct scatterlist *sg;
+	dma_addr_t iova = sg_dma_address(sgl);
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	int i;
+
+	if (av8l_fast_iova_coherent_public(mapping->pgtbl_ops, iova))
+		return;
+
+	for_each_sg(sgl, sg, nelems, i)
+		__dma_map_area(sg_virt(sg), sg->length, dir);
 }
 
 static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
 			    int nents, enum dma_data_direction dir,
 			    unsigned long attrs)
 {
-	/* 0 indicates error */
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
+	size_t iova_len;
+	bool is_coherent = is_dma_coherent(dev, attrs);
+	int prot = dma_info_to_prot(dir, is_coherent, attrs);
+	int ret;
+	dma_addr_t iova;
+	unsigned long flags;
+	size_t unused;
+
+	iova_len = iommu_dma_prepare_map_sg(dev, mapping->iovad, sg, nents);
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	iova = __fast_smmu_alloc_iova(mapping, attrs, iova_len);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	if (unlikely(iova == DMA_ERROR_CODE))
+		goto fail;
+
+	av8l_fast_map_sg_public(mapping->pgtbl_ops, iova, sg, nents, prot,
+				&unused);
+
+	ret = iommu_dma_finalise_sg(dev, sg, nents, iova);
+
+	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+		fast_smmu_sync_sg_for_device(dev, sg, nents, dir);
+
+	return ret;
+fail:
+	iommu_dma_invalidate_sg(sg, nents);
 	return 0;
 }
 
 static void fast_smmu_unmap_sg(struct device *dev,
-			       struct scatterlist *sg, int nents,
+			       struct scatterlist *sg, int nelems,
 			       enum dma_data_direction dir,
 			       unsigned long attrs)
 {
-	WARN_ON_ONCE(1);
-}
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
+	unsigned long flags;
+	dma_addr_t start;
+	size_t len;
+	struct scatterlist *tmp;
+	int i;
 
-static void fast_smmu_sync_sg_for_cpu(struct device *dev,
-		struct scatterlist *sg, int nents, enum dma_data_direction dir)
-{
-	WARN_ON_ONCE(1);
-}
+	if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
+		fast_smmu_sync_sg_for_cpu(dev, sg, nelems, dir);
 
-static void fast_smmu_sync_sg_for_device(struct device *dev,
-		struct scatterlist *sg, int nents, enum dma_data_direction dir)
-{
-	WARN_ON_ONCE(1);
+	/*
+	 * The scatterlist segments are mapped into a single
+	 * contiguous IOVA allocation, so this is incredibly easy.
+	 */
+	start = sg_dma_address(sg);
+	for_each_sg(sg_next(sg), tmp, nelems - 1, i) {
+		if (sg_dma_len(tmp) == 0)
+			break;
+		sg = tmp;
+	}
+	len = sg_dma_address(sg) + sg_dma_len(sg) - start;
+
+	av8l_fast_unmap_public(mapping->pgtbl_ops, start, len);
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	__fast_smmu_free_iova(mapping, start, len);
+	spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
 static void __fast_smmu_free_pages(struct page **pages, int count)
@@ -528,21 +565,18 @@
 			     dma_addr_t *handle, gfp_t gfp,
 			     unsigned long attrs)
 {
-	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	struct sg_table sgt;
 	dma_addr_t dma_addr, iova_iter;
 	void *addr;
-	av8l_fast_iopte *ptep;
 	unsigned long flags;
 	struct sg_mapping_iter miter;
 	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
-	int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
 	bool is_coherent = is_dma_coherent(dev, attrs);
+	int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs);
 	pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
 	struct page **pages;
 
-	prot = __get_iommu_pgprot(attrs, prot, is_coherent);
-
 	*handle = DMA_ERROR_CODE;
 
 	pages = __fast_smmu_alloc_pages(count, gfp);
@@ -580,17 +614,14 @@
 	sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
 		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
 	while (sg_miter_next(&miter)) {
-		int nptes = miter.length >> FAST_PAGE_SHIFT;
-
-		ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter);
 		if (unlikely(av8l_fast_map_public(
-				     ptep, page_to_phys(miter.page),
+				     mapping->pgtbl_ops, iova_iter,
+				     page_to_phys(miter.page),
 				     miter.length, prot))) {
 			dev_err(dev, "no map public\n");
 			/* TODO: unwind previously successful mappings */
 			goto out_free_iova;
 		}
-		fast_dmac_clean_range(mapping, ptep, ptep + nptes);
 		iova_iter += miter.length;
 	}
 	sg_miter_stop(&miter);
@@ -610,9 +641,7 @@
 out_unmap:
 	/* need to take the lock again for page tables and iova */
 	spin_lock_irqsave(&mapping->lock, flags);
-	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr);
-	av8l_fast_unmap_public(ptep, size);
-	fast_dmac_clean_range(mapping, ptep, ptep + count);
+	av8l_fast_unmap_public(mapping->pgtbl_ops, dma_addr, size);
 out_free_iova:
 	__fast_smmu_free_iova(mapping, dma_addr, size);
 	spin_unlock_irqrestore(&mapping->lock, flags);
@@ -627,11 +656,10 @@
 			   void *vaddr, dma_addr_t dma_handle,
 			   unsigned long attrs)
 {
-	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	struct vm_struct *area;
 	struct page **pages;
 	size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
-	av8l_fast_iopte *ptep;
 	unsigned long flags;
 
 	size = ALIGN(size, SZ_4K);
@@ -642,10 +670,8 @@
 
 	pages = area->pages;
 	dma_common_free_remap(vaddr, size, VM_USERMAP, false);
-	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle);
 	spin_lock_irqsave(&mapping->lock, flags);
-	av8l_fast_unmap_public(ptep, size);
-	fast_dmac_clean_range(mapping, ptep, ptep + count);
+	av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size);
 	__fast_smmu_free_iova(mapping, dma_handle, size);
 	spin_unlock_irqrestore(&mapping->lock, flags);
 	__fast_smmu_free_pages(pages, count);
@@ -699,7 +725,7 @@
 			size_t size, enum dma_data_direction dir,
 			unsigned long attrs)
 {
-	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	size_t offset = phys_addr & ~FAST_PAGE_MASK;
 	size_t len = round_up(size + offset, FAST_PAGE_SIZE);
 	dma_addr_t dma_addr;
@@ -713,7 +739,7 @@
 	if (dma_addr == DMA_ERROR_CODE)
 		return dma_addr;
 
-	prot = __fast_dma_direction_to_prot(dir);
+	prot = dma_info_to_prot(dir, false, attrs);
 	prot |= IOMMU_MMIO;
 
 	if (iommu_map(mapping->domain, dma_addr, phys_addr - offset,
@@ -731,7 +757,7 @@
 			size_t size, enum dma_data_direction dir,
 			unsigned long attrs)
 {
-	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
 	size_t offset = addr & ~FAST_PAGE_MASK;
 	size_t len = round_up(size + offset, FAST_PAGE_SIZE);
 	unsigned long flags;
@@ -751,16 +777,20 @@
 static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast,
 					  void *data)
 {
-	av8l_fast_iopte *ptep = data;
+	av8l_fast_iopte *pmds, *ptep = data;
 	dma_addr_t iova;
 	unsigned long bitmap_idx;
+	struct io_pgtable *tbl;
 
-	bitmap_idx = (unsigned long)(ptep - fast->pgtbl_pmds);
+	tbl  = container_of(fast->pgtbl_ops, struct io_pgtable, ops);
+	pmds = tbl->cfg.av8l_fast_cfg.pmds;
+
+	bitmap_idx = (unsigned long)(ptep - pmds);
 	iova = bitmap_idx << FAST_PAGE_SHIFT;
 	dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
 	dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
 	dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
-		fast->pgtbl_pmds, bitmap_idx);
+		pmds, bitmap_idx);
 	print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
 		       32, 8, fast->bitmap, fast->bitmap_size, false);
 }
@@ -835,7 +865,16 @@
 
 	spin_lock_init(&fast->lock);
 
+	fast->iovad = kzalloc(sizeof(*fast->iovad), GFP_KERNEL);
+	if (!fast->iovad)
+		goto err_free_bitmap;
+	init_iova_domain(fast->iovad, FAST_PAGE_SIZE,
+			base >> FAST_PAGE_SHIFT);
+
 	return fast;
+
+err_free_bitmap:
+	kvfree(fast->bitmap);
 err2:
 	kfree(fast);
 err:
@@ -886,16 +925,15 @@
 	spin_unlock_irqrestore(&mapping->lock, flags);
 }
 
-static int fast_smmu_errata_init(struct dma_iommu_mapping *mapping)
+static int fast_smmu_errata_init(struct dma_fast_smmu_mapping *fast)
 {
-	struct dma_fast_smmu_mapping *fast = mapping->fast;
 	int vmid = VMID_HLOS;
 	int min_iova_align = 0;
 
-	iommu_domain_get_attr(mapping->domain,
+	iommu_domain_get_attr(fast->domain,
 			DOMAIN_ATTR_QCOM_MMU500_ERRATA_MIN_IOVA_ALIGN,
 			&min_iova_align);
-	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
+	iommu_domain_get_attr(fast->domain, DOMAIN_ATTR_SECURE_VMID, &vmid);
 	if (vmid >= VMID_LAST || vmid < 0)
 		vmid = VMID_HLOS;
 
@@ -908,6 +946,25 @@
 	return 0;
 }
 
+void fast_smmu_put_dma_cookie(struct iommu_domain *domain)
+{
+	struct dma_fast_smmu_mapping *fast = domain->iova_cookie;
+
+	if (!fast)
+		return;
+
+	if (fast->iovad) {
+		put_iova_domain(fast->iovad);
+		kfree(fast->iovad);
+	}
+
+	if (fast->bitmap)
+		kvfree(fast->bitmap);
+
+	kfree(fast);
+	domain->iova_cookie = NULL;
+}
+
 /**
  * fast_smmu_init_mapping
  * @dev: valid struct device pointer
@@ -924,62 +981,47 @@
 	struct iommu_domain *domain = mapping->domain;
 	struct iommu_pgtbl_info info;
 	u64 size = (u64)mapping->bits << PAGE_SHIFT;
+	struct dma_fast_smmu_mapping *fast;
+
+	if (domain->iova_cookie) {
+		fast = domain->iova_cookie;
+		goto finish;
+	}
 
 	if (mapping->base + size > (SZ_1G * 4ULL)) {
 		dev_err(dev, "Iova end address too large\n");
 		return -EINVAL;
 	}
 
-	mapping->fast = __fast_smmu_create_mapping_sized(mapping->base, size);
-	if (IS_ERR(mapping->fast))
+	fast = __fast_smmu_create_mapping_sized(mapping->base, size);
+	if (IS_ERR(fast))
 		return -ENOMEM;
-	mapping->fast->domain = domain;
-	mapping->fast->dev = dev;
 
-	if (fast_smmu_errata_init(mapping))
+	fast->domain = domain;
+	fast->dev = dev;
+	domain->iova_cookie = fast;
+
+	err = fast_smmu_errata_init(fast);
+	if (err)
 		goto release_mapping;
 
-	fast_smmu_reserve_pci_windows(dev, mapping->fast);
-
 	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
 				  &info)) {
 		dev_err(dev, "Couldn't get page table info\n");
 		err = -EINVAL;
 		goto release_mapping;
 	}
-	mapping->fast->pgtbl_pmds = info.pmds;
+	fast->pgtbl_ops = (struct io_pgtable_ops *)info.ops;
 
-	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
-				  &mapping->fast->is_smmu_pt_coherent)) {
-		err = -EINVAL;
-		goto release_mapping;
-	}
+	fast->notifier.notifier_call = fast_smmu_notify;
+	av8l_register_notify(&fast->notifier);
 
-	mapping->fast->notifier.notifier_call = fast_smmu_notify;
-	av8l_register_notify(&mapping->fast->notifier);
-
+finish:
+	fast_smmu_reserve_pci_windows(dev, fast);
 	mapping->ops = &fast_smmu_dma_ops;
 	return 0;
 
 release_mapping:
-	kfree(mapping->fast->bitmap);
-	kfree(mapping->fast);
+	fast_smmu_put_dma_cookie(domain);
 	return err;
 }
-
-/**
- * fast_smmu_release_mapping
- * @kref: dma_iommu_mapping->kref
- *
- * Cleans up the given iommu mapping.
- */
-void fast_smmu_release_mapping(struct kref *kref)
-{
-	struct dma_iommu_mapping *mapping =
-		container_of(kref, struct dma_iommu_mapping, kref);
-
-	kvfree(mapping->fast->bitmap);
-	kfree(mapping->fast);
-	iommu_domain_free(mapping->domain);
-	kfree(mapping);
-}
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 6dd9476..4338a6c 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -51,6 +51,7 @@
 #define AV8L_FAST_PTE_SH_NS		(((av8l_fast_iopte)0) << 8)
 #define AV8L_FAST_PTE_SH_OS		(((av8l_fast_iopte)2) << 8)
 #define AV8L_FAST_PTE_SH_IS		(((av8l_fast_iopte)3) << 8)
+#define AV8L_FAST_PTE_SH_MASK		(((av8l_fast_iopte)3) << 8)
 #define AV8L_FAST_PTE_NS		(((av8l_fast_iopte)1) << 5)
 #define AV8L_FAST_PTE_VALID		(((av8l_fast_iopte)1) << 0)
 
@@ -68,6 +69,7 @@
 #define AV8L_FAST_PTE_AP_PRIV_RO	(((av8l_fast_iopte)2) << 6)
 #define AV8L_FAST_PTE_AP_RO		(((av8l_fast_iopte)3) << 6)
 #define AV8L_FAST_PTE_ATTRINDX_SHIFT	2
+#define AV8L_FAST_PTE_ATTRINDX_MASK	0x7
 #define AV8L_FAST_PTE_nG		(((av8l_fast_iopte)1) << 11)
 
 /* Stage-2 PTE */
@@ -135,6 +137,13 @@
 
 #define AV8L_FAST_PAGE_SHIFT		12
 
+#define PTE_MAIR_IDX(pte)				\
+	((pte >> AV8L_FAST_PTE_ATTRINDX_SHIFT) &&	\
+	 AV8L_FAST_PTE_ATTRINDX_MASK)
+
+#define PTE_SH_IDX(pte) (pte & AV8L_FAST_PTE_SH_MASK)
+
+#define iopte_pmd_offset(pmds, iova) (pmds + (iova >> 12))
 
 #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB
 
@@ -163,10 +172,11 @@
 	}
 }
 
-void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync)
+void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, bool skip_sync)
 {
 	int i;
-	av8l_fast_iopte *pmdp = pmds;
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	av8l_fast_iopte *pmdp = data->pmds;
 
 	for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) {
 		if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
@@ -183,11 +193,18 @@
 }
 #endif
 
-/* caller must take care of cache maintenance on *ptep */
-int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
-			 int prot)
+static void av8l_clean_range(struct io_pgtable_ops *ops,
+			av8l_fast_iopte *start, av8l_fast_iopte *end)
 {
-	int i, nptes = size >> AV8L_FAST_PAGE_SHIFT;
+	struct io_pgtable *iop = iof_pgtable_ops_to_pgtable(ops);
+
+	if (!(iop->cfg.quirks & IO_PGTABLE_QUIRK_NO_DMA))
+		dmac_clean_range(start, end);
+}
+
+static av8l_fast_iopte
+av8l_fast_prot_to_pte(struct av8l_fast_io_pgtable *data, int prot)
+{
 	av8l_fast_iopte pte = AV8L_FAST_PTE_XN
 		| AV8L_FAST_PTE_TYPE_PAGE
 		| AV8L_FAST_PTE_AF
@@ -209,13 +226,7 @@
 	else
 		pte |= AV8L_FAST_PTE_AP_RW;
 
-	paddr &= AV8L_FAST_PTE_ADDR_MASK;
-	for (i = 0; i < nptes; i++, paddr += SZ_4K) {
-		__av8l_check_for_stale_tlb(ptep + i);
-		*(ptep + i) = pte | paddr;
-	}
-
-	return 0;
+	return pte;
 }
 
 static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova,
@@ -223,44 +234,81 @@
 {
 	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
 	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
-	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
+	unsigned long i, nptes = size >> AV8L_FAST_PAGE_SHIFT;
+	av8l_fast_iopte pte;
 
-	av8l_fast_map_public(ptep, paddr, size, prot);
-	dmac_clean_range(ptep, ptep + nptes);
+	pte = av8l_fast_prot_to_pte(data, prot);
+	paddr &= AV8L_FAST_PTE_ADDR_MASK;
+	for (i = 0; i < nptes; i++, paddr += SZ_4K) {
+		__av8l_check_for_stale_tlb(ptep + i);
+		*(ptep + i) = pte | paddr;
+	}
+	av8l_clean_range(ops, ptep, ptep + nptes);
 
 	return 0;
 }
 
-static void __av8l_fast_unmap(av8l_fast_iopte *ptep, size_t size,
-			      bool need_stale_tlb_tracking)
+int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova,
+			 phys_addr_t paddr, size_t size, int prot)
 {
-	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
-	int val = need_stale_tlb_tracking
+	return av8l_fast_map(ops, iova, paddr, size, prot);
+}
+
+static size_t
+__av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+			size_t size, bool allow_stale_tlb)
+{
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	unsigned long nptes;
+	av8l_fast_iopte *ptep;
+	int val = allow_stale_tlb
 		? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI
 		: 0;
 
+	ptep = iopte_pmd_offset(data->pmds, iova);
+	nptes = size >> AV8L_FAST_PAGE_SHIFT;
+
 	memset(ptep, val, sizeof(*ptep) * nptes);
+	av8l_clean_range(ops, ptep, ptep + nptes);
+	if (!allow_stale_tlb)
+		io_pgtable_tlb_flush_all(&data->iop);
+
+	return size;
 }
 
-/* caller must take care of cache maintenance on *ptep */
-void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size)
+/* caller must take care of tlb cache maintenance */
+void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova,
+				size_t size)
 {
-	__av8l_fast_unmap(ptep, size, true);
+	__av8l_fast_unmap(ops, iova, size, true);
 }
 
 static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
 			      size_t size)
 {
-	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
-	struct io_pgtable *iop = &data->iop;
-	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
-	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
+	return __av8l_fast_unmap(ops, iova, size, false);
+}
 
-	__av8l_fast_unmap(ptep, size, false);
-	dmac_clean_range(ptep, ptep + nptes);
-	io_pgtable_tlb_flush_all(iop);
+static int av8l_fast_map_sg(struct io_pgtable_ops *ops,
+			unsigned long iova, struct scatterlist *sgl,
+			unsigned int nents, int prot, size_t *size)
+{
+	struct scatterlist *sg;
+	int i;
 
-	return size;
+	for_each_sg(sgl, sg, nents, i) {
+		av8l_fast_map(ops, iova, sg_phys(sg), sg->length, prot);
+		iova += sg->length;
+	}
+
+	return nents;
+}
+
+int av8l_fast_map_sg_public(struct io_pgtable_ops *ops,
+			    unsigned long iova, struct scatterlist *sgl,
+			    unsigned int nents, int prot, size_t *size)
+{
+	return av8l_fast_map_sg(ops, iova, sgl, nents, prot, size);
 }
 
 #if defined(CONFIG_ARM64)
@@ -305,11 +353,27 @@
 	return phys | (iova & 0xfff);
 }
 
-static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
-			    struct scatterlist *sg, unsigned int nents,
-			    int prot, size_t *size)
+phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops,
+					  unsigned long iova)
 {
-	return -ENODEV;
+	return av8l_fast_iova_to_phys(ops, iova);
+}
+
+static bool av8l_fast_iova_coherent(struct io_pgtable_ops *ops,
+					unsigned long iova)
+{
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
+
+	return ((PTE_MAIR_IDX(*ptep) == AV8L_FAST_MAIR_ATTR_IDX_CACHE) &&
+		((PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_OS) ||
+		 (PTE_SH_IDX(*ptep) == AV8L_FAST_PTE_SH_IS)));
+}
+
+bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops,
+					unsigned long iova)
+{
+	return av8l_fast_iova_coherent(ops, iova);
 }
 
 static struct av8l_fast_io_pgtable *
@@ -326,6 +390,7 @@
 		.map_sg		= av8l_fast_map_sg,
 		.unmap		= av8l_fast_unmap,
 		.iova_to_phys	= av8l_fast_iova_to_phys,
+		.is_iova_coherent = av8l_fast_iova_coherent,
 	};
 
 	return data;
@@ -634,7 +699,7 @@
 	}
 
 	/* sweep up TLB proving PTEs */
-	av8l_fast_clear_stale_ptes(pmds, false);
+	av8l_fast_clear_stale_ptes(ops, false);
 
 	/* map the entire 4GB VA space with 8K map calls */
 	for (iova = 0; iova < max; iova += SZ_8K) {
@@ -655,7 +720,7 @@
 	}
 
 	/* sweep up TLB proving PTEs */
-	av8l_fast_clear_stale_ptes(pmds, false);
+	av8l_fast_clear_stale_ptes(ops, false);
 
 	/* map the entire 4GB VA space with 16K map calls */
 	for (iova = 0; iova < max; iova += SZ_16K) {
@@ -676,7 +741,7 @@
 	}
 
 	/* sweep up TLB proving PTEs */
-	av8l_fast_clear_stale_ptes(pmds, false);
+	av8l_fast_clear_stale_ptes(ops, false);
 
 	/* map the entire 4GB VA space with 64K map calls */
 	for (iova = 0; iova < max; iova += SZ_64K) {
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 9781ca7..d5821dd 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1450,9 +1450,6 @@
 {
 	int ret;
 
-	if (group->default_domain && group->domain != group->default_domain)
-		return -EBUSY;
-
 	ret = __iommu_group_for_each_dev(group, domain,
 					 iommu_group_do_attach_device);
 	if (ret == 0)
@@ -1482,28 +1479,18 @@
 	return 0;
 }
 
+/*
+ * Although upstream implements detaching the default_domain as a noop,
+ * the "SID switch" secure usecase require complete removal of SIDS/SMRS
+ * from HLOS iommu registers.
+ */
 static void __iommu_detach_group(struct iommu_domain *domain,
 				 struct iommu_group *group)
 {
-	int ret;
-
-	if (!group->default_domain) {
-		__iommu_group_for_each_dev(group, domain,
+	__iommu_group_for_each_dev(group, domain,
 					   iommu_group_do_detach_device);
-		group->domain = NULL;
-		return;
-	}
-
-	if (group->domain == group->default_domain)
-		return;
-
-	/* Detach by re-attaching to the default domain */
-	ret = __iommu_group_for_each_dev(group, group->default_domain,
-					 iommu_group_do_attach_device);
-	if (ret != 0)
-		WARN_ON(1);
-	else
-		group->domain = group->default_domain;
+	group->domain = NULL;
+	return;
 }
 
 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 3c7e348..a781ba6 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -54,7 +54,7 @@
 	if (ret)
 		goto unlock;
 
-	if (state == LED_OFF)
+	if (state == LED_OFF && !(led_cdev->flags & LED_KEEP_TRIGGER))
 		led_trigger_remove(led_cdev);
 	led_set_brightness(led_cdev, state);
 
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 17d73db..d839811 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -44,7 +44,8 @@
 		goto unlock;
 	}
 
-	if (sysfs_streq(buf, "none")) {
+	if (sysfs_streq(buf, "none") &&
+			!(led_cdev->flags & LED_KEEP_TRIGGER)) {
 		led_trigger_remove(led_cdev);
 		goto unlock;
 	}
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 333ed4a..f6fd115 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -69,7 +69,9 @@
 		return -ENOMEM;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	base = devm_ioremap_resource(&pdev->dev, res);
+	if (!res)
+		return -ENODEV;
+	base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
 	if (IS_ERR(base))
 		return PTR_ERR(base);
 
@@ -127,6 +129,9 @@
 	{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = (void *)16 },
 	{ .compatible = "qcom,msm8998-apcs-hmss-global", .data = (void *)8 },
 	{ .compatible = "qcom,sdm845-apss-shared", .data = (void *)12 },
+	{ .compatible = "qcom,sm8150-apcs-hmss-global", .data = (void *) 12 },
+	{ .compatible = "qcom,sm8150-spcs-global", .data = (void *)0 },
+	{ .compatible = "qcom,kona-spcs-global", .data = (void *)0 },
 	{}
 };
 MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
@@ -137,6 +142,7 @@
 	.driver = {
 		.name = "qcom_apcs_ipc",
 		.of_match_table = qcom_apcs_ipc_of_match,
+		.suppress_bind_attrs = true,
 	},
 };
 
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 54fe90a..e91d70a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -137,6 +137,7 @@
 source "drivers/media/platform/xilinx/Kconfig"
 source "drivers/media/platform/rcar-vin/Kconfig"
 source "drivers/media/platform/atmel/Kconfig"
+source "drivers/media/platform/msm/Kconfig"
 
 config VIDEO_TI_CAL
 	tristate "TI CAL (Camera Adaptation Layer) driver"
@@ -641,3 +642,4 @@
 	  will be called rcar_drif.
 
 endif # SDR_PLATFORM_DRIVERS
+source "drivers/media/platform/msm/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 41322ab..1e28b41 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -94,5 +94,6 @@
 obj-$(CONFIG_VIDEO_QCOM_VENUS)		+= qcom/venus/
 
 obj-y					+= meson/
-
+obj-y                                   += msm/
 obj-y					+= cros-ec-cec/
+obj-y					+= msm/
diff --git a/drivers/media/platform/msm/Kconfig b/drivers/media/platform/msm/Kconfig
index 11c4e05..f7d6401 100644
--- a/drivers/media/platform/msm/Kconfig
+++ b/drivers/media/platform/msm/Kconfig
@@ -1,3 +1,12 @@
 # SPDX-License-Identifier: GPL-2.0
 
+menuconfig SPECTRA_CAMERA
+    bool "Qualcomm Technologies, Inc. Spectra camera and video capture support"
+    depends on ARCH_QCOM && VIDEO_V4L2 && I2C
+    help
+      Say Y here to enable selecting the video adapters for
+      Qualcomm Technologies, Inc. Spectra camera and video capture.
+      Enabling this adds support for the camera driver stack including sensor,
+      IFE and postprocessing drivers.
+
 source "drivers/media/platform/msm/sde/Kconfig"
diff --git a/drivers/media/platform/msm/Makefile b/drivers/media/platform/msm/Makefile
index 71c6e53..95990f3 100644
--- a/drivers/media/platform/msm/Makefile
+++ b/drivers/media/platform/msm/Makefile
@@ -1,3 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-y += sde/
+obj-$(CONFIG_SPECTRA_CAMERA) += camera/
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/Makefile b/drivers/media/platform/msm/camera/Makefile
new file mode 100644
index 0000000..c3a7304
--- /dev/null
+++ b/drivers/media/platform/msm/camera/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_utils/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_core/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_module/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme/
diff --git a/drivers/media/platform/msm/camera/cam_cdm/Makefile b/drivers/media/platform/msm/camera/cam_cdm/Makefile
new file mode 100644
index 0000000..596f975e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cdm_soc.o cam_cdm_util.o cam_cdm_intf.o \
+				cam_cdm_core_common.o cam_cdm_virtual_core.o \
+				cam_cdm_hw_core.o
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
new file mode 100644
index 0000000..2eb8249
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm.h
@@ -0,0 +1,254 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_H_
+#define _CAM_CDM_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_soc_util.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_intf.h"
+#include "cam_hw.h"
+#include "cam_debug_util.h"
+
+#define CAM_MAX_SW_CDM_VERSION_SUPPORTED  1
+#define CAM_SW_CDM_INDEX                  0
+#define CAM_CDM_INFLIGHT_WORKS            5
+#define CAM_CDM_HW_RESET_TIMEOUT          300
+
+#define CAM_CDM_HW_ID_MASK      0xF
+#define CAM_CDM_HW_ID_SHIFT     0x5
+#define CAM_CDM_CLIENTS_ID_MASK 0x1F
+
+#define CAM_CDM_GET_HW_IDX(x) (((x) >> CAM_CDM_HW_ID_SHIFT) & \
+	CAM_CDM_HW_ID_MASK)
+#define CAM_CDM_CREATE_CLIENT_HANDLE(hw_idx, client_idx) \
+	((((hw_idx) & CAM_CDM_HW_ID_MASK) << CAM_CDM_HW_ID_SHIFT) | \
+	 ((client_idx) & CAM_CDM_CLIENTS_ID_MASK))
+#define CAM_CDM_GET_CLIENT_IDX(x) ((x) & CAM_CDM_CLIENTS_ID_MASK)
+#define CAM_PER_CDM_MAX_REGISTERED_CLIENTS (CAM_CDM_CLIENTS_ID_MASK + 1)
+#define CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM (CAM_CDM_HW_ID_MASK + 1)
+
+/* enum cam_cdm_reg_attr - read, write, read and write permissions.*/
+enum cam_cdm_reg_attr {
+	CAM_REG_ATTR_READ,
+	CAM_REG_ATTR_WRITE,
+	CAM_REG_ATTR_READ_WRITE,
+};
+
+/* enum cam_cdm_hw_process_intf_cmd - interface commands.*/
+enum cam_cdm_hw_process_intf_cmd {
+	CAM_CDM_HW_INTF_CMD_ACQUIRE,
+	CAM_CDM_HW_INTF_CMD_RELEASE,
+	CAM_CDM_HW_INTF_CMD_SUBMIT_BL,
+	CAM_CDM_HW_INTF_CMD_RESET_HW,
+	CAM_CDM_HW_INTF_CMD_INVALID,
+};
+
+/* enum cam_cdm_regs - CDM driver offset enums.*/
+enum cam_cdm_regs {
+	/*cfg_offsets 0*/
+	CDM_CFG_HW_VERSION,
+	CDM_CFG_TITAN_VERSION,
+	CDM_CFG_RST_CMD,
+	CDM_CFG_CGC_CFG,
+	CDM_CFG_CORE_CFG,
+	CDM_CFG_CORE_EN,
+	CDM_CFG_FE_CFG,
+	/*irq_offsets 7*/
+	CDM_IRQ_MASK,
+	CDM_IRQ_CLEAR,
+	CDM_IRQ_CLEAR_CMD,
+	CDM_IRQ_SET,
+	CDM_IRQ_SET_CMD,
+	CDM_IRQ_STATUS,
+	CDM_IRQ_USR_DATA,
+	/*BL FIFO Registers 14*/
+	CDM_BL_FIFO_BASE_REG,
+	CDM_BL_FIFO_LEN_REG,
+	CDM_BL_FIFO_STORE_REG,
+	CDM_BL_FIFO_CFG,
+	CDM_BL_FIFO_RB,
+	CDM_BL_FIFO_BASE_RB,
+	CDM_BL_FIFO_LEN_RB,
+	CDM_BL_FIFO_PENDING_REQ_RB,
+	/*CDM System Debug Registers 22*/
+	CDM_DBG_WAIT_STATUS,
+	CDM_DBG_SCRATCH_0_REG,
+	CDM_DBG_SCRATCH_1_REG,
+	CDM_DBG_SCRATCH_2_REG,
+	CDM_DBG_SCRATCH_3_REG,
+	CDM_DBG_SCRATCH_4_REG,
+	CDM_DBG_SCRATCH_5_REG,
+	CDM_DBG_SCRATCH_6_REG,
+	CDM_DBG_SCRATCH_7_REG,
+	CDM_DBG_LAST_AHB_ADDR,
+	CDM_DBG_LAST_AHB_DATA,
+	CDM_DBG_CORE_DBUG,
+	CDM_DBG_LAST_AHB_ERR_ADDR,
+	CDM_DBG_LAST_AHB_ERR_DATA,
+	CDM_DBG_CURRENT_BL_BASE,
+	CDM_DBG_CURRENT_BL_LEN,
+	CDM_DBG_CURRENT_USED_AHB_BASE,
+	CDM_DBG_DEBUG_STATUS,
+	/*FE Bus Miser Registers 40*/
+	CDM_BUS_MISR_CFG_0,
+	CDM_BUS_MISR_CFG_1,
+	CDM_BUS_MISR_RD_VAL,
+	/*Performance Counter registers 43*/
+	CDM_PERF_MON_CTRL,
+	CDM_PERF_MON_0,
+	CDM_PERF_MON_1,
+	CDM_PERF_MON_2,
+	/*Spare registers 47*/
+	CDM_SPARE,
+};
+
+/* struct cam_cdm_reg_offset - struct for offset with attribute.*/
+struct cam_cdm_reg_offset {
+	uint32_t offset;
+	enum cam_cdm_reg_attr attribute;
+};
+
+/* struct cam_cdm_reg_offset_table - struct for whole offset table.*/
+struct cam_cdm_reg_offset_table {
+	uint32_t first_offset;
+	uint32_t last_offset;
+	uint32_t reg_count;
+	const struct cam_cdm_reg_offset *offsets;
+	uint32_t offset_max_size;
+};
+
+/* enum cam_cdm_flags - Bit fields for CDM flags used */
+enum cam_cdm_flags {
+	CAM_CDM_FLAG_SHARED_CDM,
+	CAM_CDM_FLAG_PRIVATE_CDM,
+};
+
+/* enum cam_cdm_type - Enum for possible CAM CDM types */
+enum cam_cdm_type {
+	CAM_VIRTUAL_CDM,
+	CAM_HW_CDM,
+};
+
+/* enum cam_cdm_mem_base_index - Enum for possible CAM CDM types */
+enum cam_cdm_mem_base_index {
+	CAM_HW_CDM_BASE_INDEX,
+	CAM_HW_CDM_MAX_INDEX = CAM_SOC_MAX_BLOCK,
+};
+
+/* struct cam_cdm_client - struct for cdm clients data.*/
+struct cam_cdm_client {
+	struct cam_cdm_acquire_data data;
+	void __iomem  *changebase_addr;
+	uint32_t stream_on;
+	uint32_t refcount;
+	struct mutex lock;
+	uint32_t handle;
+};
+
+/* struct cam_cdm_work_payload - struct for cdm work payload data.*/
+struct cam_cdm_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
+};
+
+/* enum cam_cdm_bl_cb_type - Enum for possible CAM CDM cb request types */
+enum cam_cdm_bl_cb_type {
+	CAM_HW_CDM_BL_CB_CLIENT = 1,
+	CAM_HW_CDM_BL_CB_INTERNAL,
+};
+
+/* struct cam_cdm_bl_cb_request_entry - callback entry for work to process.*/
+struct cam_cdm_bl_cb_request_entry {
+	uint8_t bl_tag;
+	enum cam_cdm_bl_cb_type request_type;
+	uint32_t client_hdl;
+	void *userdata;
+	uint32_t cookie;
+	struct list_head entry;
+};
+
+/* struct cam_cdm_hw_intf_cmd_submit_bl - cdm interface submit command.*/
+struct cam_cdm_hw_intf_cmd_submit_bl {
+	uint32_t handle;
+	struct cam_cdm_bl_request *data;
+};
+
+/* struct cam_cdm_hw_mem - CDM hw memory struct */
+struct cam_cdm_hw_mem {
+	int32_t handle;
+	uint32_t vaddr;
+	uintptr_t kmdvaddr;
+	size_t size;
+};
+
+/* struct cam_cdm - CDM hw device struct */
+struct cam_cdm {
+	uint32_t index;
+	char name[128];
+	enum cam_cdm_id id;
+	enum cam_cdm_flags flags;
+	struct completion reset_complete;
+	struct completion bl_complete;
+	struct workqueue_struct *work_queue;
+	struct list_head bl_request_list;
+	struct cam_hw_version version;
+	uint32_t hw_version;
+	uint32_t hw_family_version;
+	struct cam_iommu_handle iommu_hdl;
+	struct cam_cdm_reg_offset_table *offset_tbl;
+	struct cam_cdm_utils_ops *ops;
+	struct cam_cdm_client *clients[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+	uint8_t bl_tag;
+	atomic_t error;
+	atomic_t bl_done;
+	struct cam_cdm_hw_mem gen_irq;
+	uint32_t cpas_handle;
+};
+
+/* struct cam_cdm_private_dt_data - CDM hw custom dt data */
+struct cam_cdm_private_dt_data {
+	bool dt_cdm_shared;
+	uint32_t dt_num_supported_clients;
+	const char *dt_cdm_client_name[CAM_PER_CDM_MAX_REGISTERED_CLIENTS];
+};
+
+/* struct cam_cdm_intf_devices - CDM mgr interface devices */
+struct cam_cdm_intf_devices {
+	struct mutex lock;
+	uint32_t refcount;
+	struct cam_hw_intf *device;
+	struct cam_cdm_private_dt_data *data;
+};
+
+/* struct cam_cdm_intf_mgr - CDM mgr interface device struct */
+struct cam_cdm_intf_mgr {
+	bool probe_done;
+	struct cam_cdm_intf_devices nodes[CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM];
+	uint32_t cdm_count;
+	uint32_t dt_supported_hw_cdm;
+	int32_t refcount;
+};
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index);
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index);
+
+#endif /* _CAM_CDM_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
new file mode 100644
index 0000000..89abdb1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_io_util.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_soc.h"
+#include "cam_cdm_core_common.h"
+
+static void cam_cdm_get_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CAM_DBG(CAM_CDM, "CDM client get refcount=%d",
+		client->refcount);
+	client->refcount++;
+	mutex_unlock(&client->lock);
+}
+
+static void cam_cdm_put_client_refcount(struct cam_cdm_client *client)
+{
+	mutex_lock(&client->lock);
+	CAM_DBG(CAM_CDM, "CDM client put refcount=%d",
+		client->refcount);
+	if (client->refcount > 0) {
+		client->refcount--;
+	} else {
+		CAM_ERR(CAM_CDM, "Refcount put when zero");
+		WARN_ON(1);
+	}
+	mutex_unlock(&client->lock);
+}
+
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version)
+{
+	switch (ver) {
+	case CAM_CDM170_VERSION:
+	case CAM_CDM175_VERSION:
+		cam_version->major    = (ver & 0xF0000000);
+		cam_version->minor    = (ver & 0xFFF0000);
+		cam_version->incr     = (ver & 0xFFFF);
+		cam_version->reserved = 0;
+		return true;
+	default:
+		CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util", ver);
+	break;
+	}
+	return false;
+}
+
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	if (!irq_data)
+		return false;
+
+	CAM_DBG(CAM_CDM, "CPAS error callback type=%d", irq_data->irq_type);
+
+	return false;
+}
+
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version)
+{
+	if (by_cam_version == false) {
+		switch (ver) {
+		case CAM_CDM170_VERSION:
+		case CAM_CDM175_VERSION:
+			return &CDM170_ops;
+		default:
+			CAM_ERR(CAM_CDM, "CDM Version=%x not supported in util",
+				ver);
+		}
+	} else if (cam_version) {
+		if (((cam_version->major == 1) &&
+			(cam_version->minor == 0) &&
+			(cam_version->incr == 0)) ||
+			((cam_version->major == 1) &&
+			(cam_version->minor == 1) &&
+			(cam_version->incr == 0))) {
+
+			CAM_DBG(CAM_CDM,
+				"cam_hw_version=%x:%x:%x supported",
+				cam_version->major, cam_version->minor,
+				cam_version->incr);
+			return &CDM170_ops;
+		}
+
+		CAM_ERR(CAM_CDM, "cam_hw_version=%x:%x:%x not supported",
+			cam_version->major, cam_version->minor,
+			cam_version->incr);
+	}
+
+	return NULL;
+}
+
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+
+	list_for_each_entry(node, bl_list, entry) {
+		if (node->bl_tag == tag)
+			return node;
+	}
+	CAM_ERR(CAM_CDM, "Could not find the bl request for tag=%x", tag);
+
+	return NULL;
+}
+
+int cam_cdm_get_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core;
+
+	if ((cdm_hw) && (cdm_hw->core_info) && (get_hw_cap_args) &&
+		(sizeof(struct cam_iommu_handle) == arg_size)) {
+		cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+		*((struct cam_iommu_handle *)get_hw_cap_args) =
+			cdm_core->iommu_hdl;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int cam_cdm_find_free_client_slot(struct cam_cdm *hw)
+{
+	int i;
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (hw->clients[i] == NULL) {
+			CAM_DBG(CAM_CDM, "Found client slot %d", i);
+			return i;
+		}
+	}
+	CAM_ERR(CAM_CDM, "No more client slots");
+
+	return -EBUSY;
+}
+
+
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data)
+{
+	int i;
+	struct cam_cdm *core = NULL;
+	struct cam_cdm_client *client = NULL;
+
+	if (!cdm_hw) {
+		CAM_ERR(CAM_CDM, "CDM Notify called with NULL hw info");
+		return;
+	}
+	core = (struct cam_cdm *)cdm_hw->core_info;
+
+	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		int client_idx;
+		struct cam_cdm_bl_cb_request_entry *node =
+			(struct cam_cdm_bl_cb_request_entry *)data;
+
+		client_idx = CAM_CDM_GET_CLIENT_IDX(node->client_hdl);
+		client = core->clients[client_idx];
+		if ((!client) || (client->handle != node->client_hdl)) {
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
+				node->client_hdl);
+			return;
+		}
+		cam_cdm_get_client_refcount(client);
+		if (client->data.cam_cdm_callback) {
+			CAM_DBG(CAM_CDM, "Calling client=%s cb cookie=%d",
+				client->data.identifier, node->cookie);
+			client->data.cam_cdm_callback(node->client_hdl,
+				node->userdata, CAM_CDM_CB_STATUS_BL_SUCCESS,
+				node->cookie);
+			CAM_DBG(CAM_CDM, "Exit client cb cookie=%d",
+				node->cookie);
+		} else {
+			CAM_ERR(CAM_CDM, "No cb registered for client hdl=%x",
+				node->client_hdl);
+		}
+		cam_cdm_put_client_refcount(client);
+		return;
+	}
+
+	for (i = 0; i < CAM_PER_CDM_MAX_REGISTERED_CLIENTS; i++) {
+		if (core->clients[i] != NULL) {
+			client = core->clients[i];
+			mutex_lock(&client->lock);
+			CAM_DBG(CAM_CDM, "Found client slot %d", i);
+			if (client->data.cam_cdm_callback) {
+				if (status == CAM_CDM_CB_STATUS_PAGEFAULT) {
+					unsigned long iova =
+						(unsigned long)data;
+
+					client->data.cam_cdm_callback(
+						client->handle,
+						client->data.userdata,
+						CAM_CDM_CB_STATUS_PAGEFAULT,
+						(iova & 0xFFFFFFFF));
+				}
+			} else {
+				CAM_ERR(CAM_CDM,
+					"No cb registered for client hdl=%x",
+					client->handle);
+			}
+			mutex_unlock(&client->lock);
+		}
+	}
+}
+
+int cam_cdm_stream_ops_internal(void *hw_priv,
+	void *start_args, bool operation)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *core = NULL;
+	int rc = -EPERM;
+	int client_idx;
+	struct cam_cdm_client *client;
+	uint32_t *handle = start_args;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	client_idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+	client = core->clients[client_idx];
+	if (!client) {
+		CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client, *handle);
+		return -EINVAL;
+	}
+	cam_cdm_get_client_refcount(client);
+	if (*handle != client->handle) {
+		CAM_ERR(CAM_CDM, "client id given handle=%x invalid", *handle);
+		cam_cdm_put_client_refcount(client);
+		return -EINVAL;
+	}
+	if (operation == true) {
+		if (true == client->stream_on) {
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM client is already streamed ON");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	} else {
+		if (client->stream_on == false) {
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM client is already streamed Off");
+			cam_cdm_put_client_refcount(client);
+			return rc;
+		}
+	}
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	if (operation == true) {
+		if (!cdm_hw->open_count) {
+			struct cam_ahb_vote ahb_vote;
+			struct cam_axi_vote axi_vote;
+
+			ahb_vote.type = CAM_VOTE_ABSOLUTE;
+			ahb_vote.vote.level = CAM_SVS_VOTE;
+			axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+			axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+			rc = cam_cpas_start(core->cpas_handle,
+				&ahb_vote, &axi_vote);
+			if (rc != 0) {
+				CAM_ERR(CAM_CDM, "CPAS start failed");
+				goto end;
+			}
+			CAM_DBG(CAM_CDM, "CDM init first time");
+			if (core->id == CAM_CDM_VIRTUAL) {
+				CAM_DBG(CAM_CDM,
+					"Virtual CDM HW init first time");
+				rc = 0;
+			} else {
+				CAM_DBG(CAM_CDM, "CDM HW init first time");
+				rc = cam_hw_cdm_init(hw_priv, NULL, 0);
+				if (rc == 0) {
+					rc = cam_hw_cdm_alloc_genirq_mem(
+						hw_priv);
+					if (rc != 0) {
+						CAM_ERR(CAM_CDM,
+							"Genirqalloc failed");
+						cam_hw_cdm_deinit(hw_priv,
+							NULL, 0);
+					}
+				} else {
+					CAM_ERR(CAM_CDM, "CDM HW init failed");
+				}
+			}
+			if (rc == 0) {
+				cdm_hw->open_count++;
+				client->stream_on = true;
+			} else {
+				if (cam_cpas_stop(core->cpas_handle))
+					CAM_ERR(CAM_CDM, "CPAS stop failed");
+			}
+		} else {
+			cdm_hw->open_count++;
+			CAM_DBG(CAM_CDM, "CDM HW already ON count=%d",
+				cdm_hw->open_count);
+			rc = 0;
+			client->stream_on = true;
+		}
+	} else {
+		if (cdm_hw->open_count) {
+			cdm_hw->open_count--;
+			CAM_DBG(CAM_CDM, "stream OFF CDM %d",
+				cdm_hw->open_count);
+			if (!cdm_hw->open_count) {
+				CAM_DBG(CAM_CDM, "CDM Deinit now");
+				if (core->id == CAM_CDM_VIRTUAL) {
+					CAM_DBG(CAM_CDM,
+						"Virtual CDM HW Deinit");
+					rc = 0;
+				} else {
+					CAM_DBG(CAM_CDM, "CDM HW Deinit now");
+					rc = cam_hw_cdm_deinit(
+						hw_priv, NULL, 0);
+					if (cam_hw_cdm_release_genirq_mem(
+						hw_priv))
+						CAM_ERR(CAM_CDM,
+							"Genirq release fail");
+				}
+				if (rc) {
+					CAM_ERR(CAM_CDM,
+						"Deinit failed in streamoff");
+				} else {
+					client->stream_on = false;
+					rc = cam_cpas_stop(core->cpas_handle);
+					if (rc)
+						CAM_ERR(CAM_CDM,
+							"CPAS stop failed");
+				}
+			} else {
+				client->stream_on = false;
+				rc = 0;
+				CAM_DBG(CAM_CDM,
+					"Client stream off success =%d",
+					cdm_hw->open_count);
+			}
+		} else {
+			CAM_DBG(CAM_CDM, "stream OFF CDM Invalid %d",
+				cdm_hw->open_count);
+			rc = -ENXIO;
+		}
+	}
+end:
+	cam_cdm_put_client_refcount(client);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+}
+
+int cam_cdm_stream_start(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, true);
+	return rc;
+
+}
+
+int cam_cdm_stream_stop(void *hw_priv,
+	void *start_args, uint32_t size)
+{
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	rc = cam_cdm_stream_ops_internal(hw_priv, start_args, false);
+	return rc;
+
+}
+
+int cam_cdm_process_cmd(void *hw_priv,
+	uint32_t cmd, void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_data = NULL;
+	struct cam_cdm *core = NULL;
+	int rc = -EINVAL;
+
+	if ((!hw_priv) || (!cmd_args) ||
+		(cmd >= CAM_CDM_HW_INTF_CMD_INVALID))
+		return rc;
+
+	soc_data = &cdm_hw->soc_info;
+	core = (struct cam_cdm *)cdm_hw->core_info;
+	switch (cmd) {
+	case CAM_CDM_HW_INTF_CMD_SUBMIT_BL: {
+		struct cam_cdm_hw_intf_cmd_submit_bl *req;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_hw_intf_cmd_submit_bl) != arg_size) {
+			CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
+				arg_size);
+			break;
+		}
+		req = (struct cam_cdm_hw_intf_cmd_submit_bl *)cmd_args;
+		if ((req->data->type < 0) ||
+			(req->data->type > CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA)) {
+			CAM_ERR(CAM_CDM, "Invalid req bl cmd addr type=%d",
+				req->data->type);
+			break;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(req->handle);
+		client = core->clients[idx];
+		if ((!client) || (req->handle != client->handle)) {
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x", client,
+				req->handle);
+			break;
+		}
+		cam_cdm_get_client_refcount(client);
+		if ((req->data->flag == true) &&
+			(!client->data.cam_cdm_callback)) {
+			CAM_ERR(CAM_CDM,
+				"CDM request cb without registering cb");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (client->stream_on != true) {
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM needs to be streamed ON first");
+			cam_cdm_put_client_refcount(client);
+			break;
+		}
+		if (core->id == CAM_CDM_VIRTUAL)
+			rc = cam_virtual_cdm_submit_bl(cdm_hw, req, client);
+		else
+			rc = cam_hw_cdm_submit_bl(cdm_hw, req, client);
+
+		cam_cdm_put_client_refcount(client);
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_ACQUIRE: {
+		struct cam_cdm_acquire_data *data;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(struct cam_cdm_acquire_data) != arg_size) {
+			CAM_ERR(CAM_CDM, "Invalid CDM cmd %d arg size=%x", cmd,
+				arg_size);
+			break;
+		}
+
+		mutex_lock(&cdm_hw->hw_mutex);
+		data = (struct cam_cdm_acquire_data *)cmd_args;
+		CAM_DBG(CAM_CDM, "Trying to acquire client=%s in hw idx=%d",
+			data->identifier, core->index);
+		idx = cam_cdm_find_free_client_slot(core);
+		if ((idx < 0) || (core->clients[idx])) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			CAM_ERR(CAM_CDM,
+				"Fail to client slots, client=%s in hw idx=%d",
+			data->identifier, core->index);
+			break;
+		}
+		core->clients[idx] = kzalloc(sizeof(struct cam_cdm_client),
+			GFP_KERNEL);
+		if (!core->clients[idx]) {
+			mutex_unlock(&cdm_hw->hw_mutex);
+			rc = -ENOMEM;
+			break;
+		}
+
+		mutex_unlock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		mutex_init(&client->lock);
+		data->ops = core->ops;
+		if (core->id == CAM_CDM_VIRTUAL) {
+			data->cdm_version.major = 1;
+			data->cdm_version.minor = 0;
+			data->cdm_version.incr = 0;
+			data->cdm_version.reserved = 0;
+			data->ops = cam_cdm_get_ops(0,
+					&data->cdm_version, true);
+			if (!data->ops) {
+				mutex_destroy(&client->lock);
+				mutex_lock(&cdm_hw->hw_mutex);
+				kfree(core->clients[idx]);
+				core->clients[idx] = NULL;
+				mutex_unlock(
+					&cdm_hw->hw_mutex);
+				rc = -EPERM;
+				CAM_ERR(CAM_CDM, "Invalid ops for virtual cdm");
+				break;
+			}
+		} else {
+			data->cdm_version = core->version;
+		}
+
+		cam_cdm_get_client_refcount(client);
+		mutex_lock(&client->lock);
+		memcpy(&client->data, data,
+			sizeof(struct cam_cdm_acquire_data));
+		client->handle = CAM_CDM_CREATE_CLIENT_HANDLE(
+					core->index,
+					idx);
+		client->stream_on = false;
+		data->handle = client->handle;
+		CAM_DBG(CAM_CDM, "Acquired client=%s in hwidx=%d",
+			data->identifier, core->index);
+		mutex_unlock(&client->lock);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RELEASE: {
+		uint32_t *handle = cmd_args;
+		int idx;
+		struct cam_cdm_client *client;
+
+		if (sizeof(uint32_t) != arg_size) {
+			CAM_ERR(CAM_CDM,
+				"Invalid CDM cmd %d size=%x for handle=%x",
+				cmd, arg_size, *handle);
+			return -EINVAL;
+		}
+		idx = CAM_CDM_GET_CLIENT_IDX(*handle);
+		mutex_lock(&cdm_hw->hw_mutex);
+		client = core->clients[idx];
+		if ((!client) || (*handle != client->handle)) {
+			CAM_ERR(CAM_CDM, "Invalid client %pK hdl=%x",
+				client, *handle);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		cam_cdm_put_client_refcount(client);
+		mutex_lock(&client->lock);
+		if (client->refcount != 0) {
+			CAM_ERR(CAM_CDM, "CDM Client refcount not zero %d",
+				client->refcount);
+			rc = -EPERM;
+			mutex_unlock(&client->lock);
+			mutex_unlock(&cdm_hw->hw_mutex);
+			break;
+		}
+		core->clients[idx] = NULL;
+		mutex_unlock(&client->lock);
+		mutex_destroy(&client->lock);
+		kfree(client);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		rc = 0;
+		break;
+	}
+	case CAM_CDM_HW_INTF_CMD_RESET_HW: {
+		CAM_ERR(CAM_CDM, "CDM HW reset not supported for handle =%x",
+			*((uint32_t *)cmd_args));
+		break;
+	}
+	default:
+		CAM_ERR(CAM_CDM, "CDM HW intf command not valid =%d", cmd);
+		break;
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
new file mode 100644
index 0000000..1a9f1d4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_core_common.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_CORE_COMMON_H_
+#define _CAM_CDM_CORE_COMMON_H_
+
+#include "cam_mem_mgr.h"
+
+#define CAM_CDM170_VERSION 0x10000000
+#define CAM_CDM175_VERSION 0x10010000
+
+extern struct cam_cdm_utils_ops CDM170_ops;
+
+int cam_hw_cdm_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_deinit(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv);
+int cam_hw_cdm_release_genirq_mem(void *hw_priv);
+int cam_cdm_get_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size);
+int cam_cdm_stream_ops_internal(void *hw_priv, void *start_args,
+	bool operation);
+int cam_cdm_stream_start(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_stream_stop(void *hw_priv, void *start_args, uint32_t size);
+int cam_cdm_process_cmd(void *hw_priv, uint32_t cmd, void *cmd_args,
+	uint32_t arg_size);
+bool cam_cdm_set_cam_hw_version(
+	uint32_t ver, struct cam_hw_version *cam_version);
+bool cam_cdm_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data);
+struct cam_cdm_utils_ops *cam_cdm_get_ops(
+	uint32_t ver, struct cam_hw_version *cam_version, bool by_cam_version);
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client);
+struct cam_cdm_bl_cb_request_entry *cam_cdm_find_request_by_bl_tag(
+	uint32_t tag, struct list_head *bl_list);
+void cam_cdm_notify_clients(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_cb_status status, void *data);
+
+#endif /* _CAM_CDM_CORE_COMMON_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
new file mode 100644
index 0000000..62587fe
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_hw_core.c
@@ -0,0 +1,1139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <media/cam_req_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw_cdm170_reg.h"
+
+#define CAM_HW_CDM_CPAS_0_NAME "qcom,cam170-cpas-cdm0"
+#define CAM_HW_CDM_IPE_0_NAME "qcom,cam170-ipe0-cdm"
+#define CAM_HW_CDM_IPE_1_NAME "qcom,cam170-ipe1-cdm"
+#define CAM_HW_CDM_BPS_NAME "qcom,cam170-bps-cdm"
+
+#define CAM_CDM_BL_FIFO_WAIT_TIMEOUT 2000
+
+static void cam_hw_cdm_work(struct work_struct *work);
+
+/* DT match table entry for all CDM variants*/
+static const struct of_device_id msm_cam_hw_cdm_dt_match[] = {
+	{
+		.compatible = CAM_HW_CDM_CPAS_0_NAME,
+		.data = &cam170_cpas_cdm_offset_table,
+	},
+	{}
+};
+
+static enum cam_cdm_id cam_hw_cdm_get_id_by_name(char *name)
+{
+	if (!strcmp(CAM_HW_CDM_CPAS_0_NAME, name))
+		return CAM_CDM_CPAS_0;
+
+	return CAM_CDM_MAX;
+}
+
+int cam_hw_cdm_bl_fifo_pending_bl_rb(struct cam_hw_info *cdm_hw,
+	uint32_t *pending_bl)
+{
+	int rc = 0;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+		pending_bl)) {
+		CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+static int cam_hw_cdm_enable_bl_done_irq(struct cam_hw_info *cdm_hw,
+	bool enable)
+{
+	int rc = -EIO;
+	uint32_t irq_mask = 0;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_MASK,
+		&irq_mask)) {
+		CAM_ERR(CAM_CDM, "Failed to read CDM IRQ mask");
+		return rc;
+	}
+
+	if (enable == true) {
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+			(irq_mask | 0x4))) {
+			CAM_ERR(CAM_CDM, "Write failed to enable BL done irq");
+		} else {
+			atomic_inc(&core->bl_done);
+			rc = 0;
+			CAM_DBG(CAM_CDM, "BL done irq enabled =%d",
+				atomic_read(&core->bl_done));
+		}
+	} else {
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK,
+			(irq_mask & 0x70003))) {
+			CAM_ERR(CAM_CDM, "Write failed to disable BL done irq");
+		} else {
+			atomic_dec(&core->bl_done);
+			rc = 0;
+			CAM_DBG(CAM_CDM, "BL done irq disable =%d",
+				atomic_read(&core->bl_done));
+		}
+	}
+	return rc;
+}
+
+static int cam_hw_cdm_enable_core(struct cam_hw_info *cdm_hw, bool enable)
+{
+	int rc = 0;
+
+	if (enable == true) {
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x01)) {
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW core enable");
+			rc = -EIO;
+		}
+	} else {
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_CORE_EN, 0x02)) {
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW core disable");
+			rc = -EIO;
+		}
+	}
+	return rc;
+}
+
+int cam_hw_cdm_enable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0x10100)) {
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_disable_core_dbg(struct cam_hw_info *cdm_hw)
+{
+	int rc = 0;
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, 0)) {
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW core debug");
+		rc = -EIO;
+	}
+
+	return rc;
+}
+
+void cam_hw_cdm_dump_scratch_registors(struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg = 0;
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump core en=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_0_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch0=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_1_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch1=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_2_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch2=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_3_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch3=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_4_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch4=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_5_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch5=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_6_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch6=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_SCRATCH_7_REG, &dump_reg);
+	CAM_ERR(CAM_CDM, "dump scratch7=%x", dump_reg);
+
+}
+
+void cam_hw_cdm_dump_core_debug_registers(
+	struct cam_hw_info *cdm_hw)
+{
+	uint32_t dump_reg, core_dbg, loop_cnt;
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_EN, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW core status=%x", dump_reg);
+	/* First pause CDM, If it fails still proceed to dump debug info */
+	cam_hw_cdm_enable_core(cdm_hw, false);
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
+	loop_cnt = dump_reg;
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_DEBUG_STATUS, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW Debug status reg=%x", dump_reg);
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CORE_DBUG, &core_dbg);
+	if (core_dbg & 0x100) {
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_ADDR, &dump_reg);
+		CAM_ERR(CAM_CDM, "AHB dump reglastaddr=%x", dump_reg);
+		cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_LAST_AHB_DATA, &dump_reg);
+		CAM_ERR(CAM_CDM, "AHB dump reglastdata=%x", dump_reg);
+	} else {
+		CAM_ERR(CAM_CDM, "CDM HW AHB dump not enable");
+	}
+
+	if (core_dbg & 0x10000) {
+		int i;
+
+		CAM_ERR(CAM_CDM, "CDM HW BL FIFO dump with loop count=%d",
+			loop_cnt);
+		for (i = 0 ; i < loop_cnt ; i++) {
+			cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_RB, i);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_RB,
+				&dump_reg);
+			CAM_ERR(CAM_CDM, "BL(%d) base addr =%x", i, dump_reg);
+			cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_RB,
+				&dump_reg);
+			CAM_ERR(CAM_CDM, "BL(%d) len=%d tag=%d", i,
+				(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+		}
+	} else {
+		CAM_ERR(CAM_CDM, "CDM HW BL FIFO readback not enable");
+	}
+
+	CAM_ERR(CAM_CDM, "CDM HW default dump");
+	cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_CORE_CFG, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW core cfg=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW irq status=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_SET, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW irq set reg=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_BASE, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current BL base=%x", dump_reg);
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_BL_LEN, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current BL len=%d tag=%d",
+		(dump_reg & 0xFFFFF), (dump_reg & 0xFF000000));
+
+	cam_cdm_read_hw_reg(cdm_hw, CDM_DBG_CURRENT_USED_AHB_BASE, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current AHB base=%x", dump_reg);
+
+	cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &dump_reg);
+	CAM_ERR(CAM_CDM, "CDM HW current pending BL=%x", dump_reg);
+
+	/* Enable CDM back */
+	cam_hw_cdm_enable_core(cdm_hw, true);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+}
+
+int cam_hw_cdm_wait_for_bl_fifo(struct cam_hw_info *cdm_hw,
+	uint32_t bl_count)
+{
+	uint32_t pending_bl = 0;
+	int32_t available_bl_slots = 0;
+	int rc = -EIO;
+	long time_left;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	do {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_BL_FIFO_PENDING_REQ_RB,
+			&pending_bl)) {
+			CAM_ERR(CAM_CDM, "Failed to read CDM pending BL's");
+			rc = -EIO;
+			break;
+		}
+		available_bl_slots = CAM_CDM_HWFIFO_SIZE - pending_bl;
+		if (available_bl_slots < 0) {
+			CAM_ERR(CAM_CDM, "Invalid available slots %d:%d:%d",
+				available_bl_slots, CAM_CDM_HWFIFO_SIZE,
+				pending_bl);
+			break;
+		}
+		if (bl_count < (available_bl_slots - 1)) {
+			CAM_DBG(CAM_CDM,
+				"BL slot available_cnt=%d requested=%d",
+				(available_bl_slots - 1), bl_count);
+				rc = bl_count;
+				break;
+		} else if (0 == (available_bl_slots - 1)) {
+			rc = cam_hw_cdm_enable_bl_done_irq(cdm_hw, true);
+			if (rc) {
+				CAM_ERR(CAM_CDM, "Enable BL done irq failed");
+				break;
+			}
+			time_left = wait_for_completion_timeout(
+				&core->bl_complete, msecs_to_jiffies(
+				CAM_CDM_BL_FIFO_WAIT_TIMEOUT));
+			if (time_left <= 0) {
+				CAM_ERR(CAM_CDM,
+					"CDM HW BL Wait timed out failed");
+				if (cam_hw_cdm_enable_bl_done_irq(cdm_hw,
+					false))
+					CAM_ERR(CAM_CDM,
+						"Disable BL done irq failed");
+				rc = -EIO;
+				break;
+			}
+			if (cam_hw_cdm_enable_bl_done_irq(cdm_hw, false))
+				CAM_ERR(CAM_CDM, "Disable BL done irq failed");
+			rc = 0;
+			CAM_DBG(CAM_CDM, "CDM HW is ready for data");
+		} else {
+			rc = (bl_count - (available_bl_slots - 1));
+			break;
+		}
+	} while (1);
+
+	return rc;
+}
+
+bool cam_hw_cdm_bl_write(struct cam_hw_info *cdm_hw, uint32_t src,
+	uint32_t len, uint32_t tag)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_BASE_REG, src)) {
+		CAM_ERR(CAM_CDM, "Failed to write CDM base to BL base");
+		return true;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_LEN_REG,
+		((len & 0xFFFFF) | ((tag & 0xFF) << 20)))) {
+		CAM_ERR(CAM_CDM, "Failed to write CDM BL len");
+		return true;
+	}
+	return false;
+}
+
+bool cam_hw_cdm_commit_bl_write(struct cam_hw_info *cdm_hw)
+{
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_BL_FIFO_STORE_REG, 1)) {
+		CAM_ERR(CAM_CDM, "Failed to write CDM commit BL");
+		return true;
+	}
+	return false;
+}
+
+int cam_hw_cdm_submit_gen_irq(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req)
+{
+	struct cam_cdm_bl_cb_request_entry *node;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t len;
+	int rc;
+
+	if (core->bl_tag > 63) {
+		CAM_ERR(CAM_CDM, "bl_tag invalid =%d", core->bl_tag);
+		rc = -EINVAL;
+		goto end;
+	}
+	CAM_DBG(CAM_CDM, "CDM write BL last cmd tag=%x total=%d cookie=%d",
+		core->bl_tag, req->data->cmd_arrary_count, req->data->cookie);
+	node = kzalloc(sizeof(struct cam_cdm_bl_cb_request_entry),
+			GFP_KERNEL);
+	if (!node) {
+		rc = -ENOMEM;
+		goto end;
+	}
+	node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+	node->client_hdl = req->handle;
+	node->cookie = req->data->cookie;
+	node->bl_tag = core->bl_tag;
+	node->userdata = req->data->userdata;
+	list_add_tail(&node->entry, &core->bl_request_list);
+	len = core->ops->cdm_required_size_genirq() * core->bl_tag;
+	core->ops->cdm_write_genirq(((uint32_t *)core->gen_irq.kmdvaddr + len),
+		core->bl_tag);
+	rc = cam_hw_cdm_bl_write(cdm_hw, (core->gen_irq.vaddr + (4*len)),
+		((4 * core->ops->cdm_required_size_genirq()) - 1),
+		core->bl_tag);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CDM hw bl write failed for gen irq bltag=%d",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -EIO;
+		goto end;
+	}
+
+	if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+		CAM_ERR(CAM_CDM, "Cannot commit the genirq BL with tag tag=%d",
+			core->bl_tag);
+		list_del_init(&node->entry);
+		kfree(node);
+		rc = -EIO;
+	}
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+	uint32_t pending_bl = 0;
+	int write_count = 0;
+
+	if (req->data->cmd_arrary_count > CAM_CDM_HWFIFO_SIZE) {
+		pr_info("requested BL more than max size, cnt=%d max=%d",
+			req->data->cmd_arrary_count, CAM_CDM_HWFIFO_SIZE);
+	}
+
+	if (atomic_read(&core->error))
+		return -EIO;
+
+	mutex_lock(&cdm_hw->hw_mutex);
+	mutex_lock(&client->lock);
+	rc = cam_hw_cdm_bl_fifo_pending_bl_rb(cdm_hw, &pending_bl);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Cannot read the current BL depth");
+		mutex_unlock(&client->lock);
+		mutex_unlock(&cdm_hw->hw_mutex);
+		return rc;
+	}
+
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uint64_t hw_vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			CAM_ERR(CAM_CDM,
+				"cmd len(%d) is invalid cnt=%d total cnt=%d",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -EINVAL;
+			break;
+		}
+		if (atomic_read(&core->error)) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"In error state cnt=%d total cnt=%d\n",
+				i, req->data->cmd_arrary_count);
+			rc = -EIO;
+			break;
+		}
+		if (write_count == 0) {
+			write_count = cam_hw_cdm_wait_for_bl_fifo(cdm_hw,
+				(req->data->cmd_arrary_count - i));
+			if (write_count < 0) {
+				CAM_ERR(CAM_CDM,
+					"wait for bl fifo failed %d:%d",
+					i, req->data->cmd_arrary_count);
+				rc = -EIO;
+				break;
+			}
+		} else {
+			write_count--;
+		}
+
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_io_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				core->iommu_hdl.non_secure, &hw_vaddr_ptr,
+				&len);
+		} else if (req->data->type == CAM_CDM_BL_CMD_TYPE_HW_IOVA) {
+			if (!cdm_cmd->cmd[i].bl_addr.hw_iova) {
+				CAM_ERR(CAM_CDM,
+					"Hw bl hw_iova is invalid %d:%d",
+					i, req->data->cmd_arrary_count);
+				rc = -EINVAL;
+				break;
+			}
+			rc = 0;
+			hw_vaddr_ptr =
+				(uint64_t)cdm_cmd->cmd[i].bl_addr.hw_iova;
+			len = cdm_cmd->cmd[i].len + cdm_cmd->cmd[i].offset;
+		} else {
+			CAM_ERR(CAM_CDM,
+				"Only mem hdl/hw va type is supported %d",
+				req->data->type);
+			rc = -EINVAL;
+			break;
+		}
+
+		if ((!rc) && (hw_vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CAM_DBG(CAM_CDM, "Got the HW VA");
+			if (core->bl_tag >=
+				(CAM_CDM_HWFIFO_SIZE - 1))
+				core->bl_tag = 0;
+			rc = cam_hw_cdm_bl_write(cdm_hw,
+				((uint32_t)hw_vaddr_ptr +
+					cdm_cmd->cmd[i].offset),
+				(cdm_cmd->cmd[i].len - 1), core->bl_tag);
+			if (rc) {
+				CAM_ERR(CAM_CDM, "Hw bl write failed %d:%d",
+					i, req->data->cmd_arrary_count);
+				rc = -EIO;
+				break;
+			}
+		} else {
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for hdl=%x len=%zu:%d",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			CAM_ERR(CAM_CDM, "Sanity check failed for %d:%d",
+				i, req->data->cmd_arrary_count);
+			rc = -EINVAL;
+			break;
+		}
+
+		if (!rc) {
+			CAM_DBG(CAM_CDM,
+				"write BL success for cnt=%d with tag=%d",
+				i, core->bl_tag);
+
+			CAM_DBG(CAM_CDM, "Now commit the BL");
+			if (cam_hw_cdm_commit_bl_write(cdm_hw)) {
+				CAM_ERR(CAM_CDM,
+					"Cannot commit the BL %d tag=%d",
+					i, core->bl_tag);
+				rc = -EIO;
+				break;
+			}
+			CAM_DBG(CAM_CDM, "BL commit success BL %d tag=%d", i,
+				core->bl_tag);
+			core->bl_tag++;
+			if ((req->data->flag == true) &&
+				(i == (req->data->cmd_arrary_count -
+				1))) {
+				rc = cam_hw_cdm_submit_gen_irq(
+					cdm_hw, req);
+				if (rc == 0)
+					core->bl_tag++;
+			}
+		}
+	}
+	mutex_unlock(&client->lock);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	return rc;
+
+}
+
+static void cam_hw_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+
+		CAM_DBG(CAM_CDM, "IRQ status=%x", payload->irq_status);
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CAM_DBG(CAM_CDM, "inline IRQ data=%x",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+					payload->irq_data,
+					&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+						CAM_HW_CDM_BL_CB_INTERNAL) {
+					CAM_ERR(CAM_CDM,
+						"Invalid node=%pK %d", node,
+						node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				CAM_ERR(CAM_CDM,
+					"Inval node, inline_irq st=%x data=%x",
+					payload->irq_status, payload->irq_data);
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK) {
+			CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
+			complete(&core->reset_complete);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK) {
+			if (atomic_read(&core->bl_done)) {
+				CAM_DBG(CAM_CDM, "CDM HW BL done IRQ");
+				complete(&core->bl_complete);
+			}
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Invalid command IRQ, Need HW reset\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "AHB Error IRQ\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+			atomic_dec(&core->error);
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "Overflow Error IRQ\n");
+			atomic_inc(&core->error);
+			cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+			atomic_dec(&core->error);
+		}
+		kfree(payload);
+	} else {
+		CAM_ERR(CAM_CDM, "NULL payload");
+	}
+
+}
+
+static void cam_hw_cdm_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token,
+	uint32_t buf_info)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_cdm *core = NULL;
+
+	if (token) {
+		cdm_hw = (struct cam_hw_info *)token;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		atomic_inc(&core->error);
+		cam_hw_cdm_dump_core_debug_registers(cdm_hw);
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "Page fault iova addr %pK\n",
+			(void *)iova);
+		cam_cdm_notify_clients(cdm_hw, CAM_CDM_CB_STATUS_PAGEFAULT,
+			(void *)iova);
+		atomic_dec(&core->error);
+	} else {
+		CAM_ERR(CAM_CDM, "Invalid token");
+	}
+
+}
+
+irqreturn_t cam_hw_cdm_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cdm_hw = data;
+	struct cam_cdm *cdm_core = cdm_hw->core_info;
+	struct cam_cdm_work_payload *payload;
+	bool work_status;
+
+	CAM_DBG(CAM_CDM, "Got irq");
+	payload = kzalloc(sizeof(struct cam_cdm_work_payload), GFP_ATOMIC);
+	if (payload) {
+		if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_STATUS,
+				&payload->irq_status)) {
+			CAM_ERR(CAM_CDM, "Failed to read CDM HW IRQ status");
+		}
+		if (!payload->irq_status) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid irq received\n");
+			kfree(payload);
+			return IRQ_HANDLED;
+		}
+		if (payload->irq_status &
+			CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK) {
+			if (cam_cdm_read_hw_reg(cdm_hw, CDM_IRQ_USR_DATA,
+				&payload->irq_data)) {
+				CAM_ERR(CAM_CDM,
+					"Failed to read CDM HW IRQ data");
+			}
+		}
+		CAM_DBG(CAM_CDM, "Got payload=%d", payload->irq_status);
+		payload->hw = cdm_hw;
+		INIT_WORK((struct work_struct *)&payload->work,
+			cam_hw_cdm_work);
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR,
+			payload->irq_status))
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ Clear");
+		if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_CLEAR_CMD, 0x01))
+			CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ cmd");
+		work_status = queue_work(cdm_core->work_queue, &payload->work);
+		if (work_status == false) {
+			CAM_ERR(CAM_CDM, "Failed to queue work for irq=%x",
+				payload->irq_status);
+			kfree(payload);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_hw_cdm_alloc_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_mem_mgr_request_desc genirq_alloc_cmd;
+	struct cam_mem_mgr_memory_desc genirq_alloc_out;
+	struct cam_cdm *cdm_core = NULL;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_alloc_cmd.align = 0;
+	genirq_alloc_cmd.size = (8 * CAM_CDM_HWFIFO_SIZE);
+	genirq_alloc_cmd.smmu_hdl = cdm_core->iommu_hdl.non_secure;
+	genirq_alloc_cmd.flags = CAM_MEM_FLAG_HW_READ_WRITE;
+	rc = cam_mem_mgr_request_mem(&genirq_alloc_cmd,
+		&genirq_alloc_out);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to get genirq cmd space rc=%d", rc);
+		goto end;
+	}
+	cdm_core->gen_irq.handle = genirq_alloc_out.mem_handle;
+	cdm_core->gen_irq.vaddr = (genirq_alloc_out.iova & 0xFFFFFFFF);
+	cdm_core->gen_irq.kmdvaddr = genirq_alloc_out.kva;
+	cdm_core->gen_irq.size = genirq_alloc_out.len;
+
+end:
+	return rc;
+}
+
+int cam_hw_cdm_release_genirq_mem(void *hw_priv)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_mem_mgr_memory_desc genirq_release_cmd;
+	int rc =  -EINVAL;
+
+	if (!hw_priv)
+		return rc;
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	genirq_release_cmd.mem_handle = cdm_core->gen_irq.handle;
+	rc = cam_mem_mgr_release_mem(&genirq_release_cmd);
+	if (rc)
+		CAM_ERR(CAM_CDM, "Failed to put genirq cmd space for hw");
+
+	return rc;
+}
+
+int cam_hw_cdm_init(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc;
+	long time_left;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Enable platform failed");
+		goto end;
+	}
+
+	CAM_DBG(CAM_CDM, "Enable soc done");
+
+/* Before triggering the reset to HW, clear the reset complete */
+	atomic_set(&cdm_core->error, 0);
+	atomic_set(&cdm_core->bl_done, 0);
+	reinit_completion(&cdm_core->reset_complete);
+	reinit_completion(&cdm_core->bl_complete);
+
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003)) {
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW IRQ mask");
+		goto disable_return;
+	}
+	if (cam_cdm_write_hw_reg(cdm_hw, CDM_CFG_RST_CMD, 0x9)) {
+		CAM_ERR(CAM_CDM, "Failed to Write CDM HW reset");
+		goto disable_return;
+	}
+
+	CAM_DBG(CAM_CDM, "Waiting for CDM HW resetdone");
+	time_left = wait_for_completion_timeout(&cdm_core->reset_complete,
+		msecs_to_jiffies(CAM_CDM_HW_RESET_TIMEOUT));
+
+	if (time_left <= 0) {
+		CAM_ERR(CAM_CDM, "CDM HW reset Wait failed rc=%d", rc);
+		goto disable_return;
+	} else {
+		CAM_DBG(CAM_CDM, "CDM Init success");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_UP;
+		cam_cdm_write_hw_reg(cdm_hw, CDM_IRQ_MASK, 0x70003);
+		rc = 0;
+		goto end;
+	}
+
+disable_return:
+	rc = -EIO;
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
+end:
+	return rc;
+}
+
+int cam_hw_cdm_deinit(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cdm_hw = hw_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = 0;
+
+	if (!hw_priv)
+		return -EINVAL;
+
+	soc_info = &cdm_hw->soc_info;
+	cdm_core = cdm_hw->core_info;
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "disable platform failed");
+	} else {
+		CAM_DBG(CAM_CDM, "CDM Deinit success");
+		cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	struct cam_cpas_register_params cpas_parms;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw->soc_info.dev = &pdev->dev;
+	cdm_hw->soc_info.dev_name = pdev->name;
+	cdm_hw_intf->hw_type = CAM_HW_CDM;
+	cdm_hw->open_count = 0;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+
+	rc = cam_hw_cdm_soc_get_dt_properties(cdm_hw, msm_cam_hw_cdm_dt_match);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to get dt properties");
+		goto release_mem;
+	}
+	cdm_hw_intf->hw_idx = cdm_hw->soc_info.index;
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+		cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	cdm_core->id = cam_hw_cdm_get_id_by_name(cdm_core->name);
+	if (cdm_core->id >= CAM_CDM_MAX) {
+		CAM_ERR(CAM_CDM, "Failed to get CDM HW name for %s",
+			cdm_core->name);
+		goto release_private_mem;
+	}
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	init_completion(&cdm_core->bl_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = cam_hw_cdm_init;
+	cdm_hw_intf->hw_ops.deinit = cam_hw_cdm_deinit;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+	mutex_lock(&cdm_hw->hw_mutex);
+
+	CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	rc = cam_smmu_get_handle("cpas-cdm0", &cdm_core->iommu_hdl.non_secure);
+	if (rc < 0) {
+		CAM_ERR(CAM_CDM, "cpas-cdm get iommu handle failed");
+		goto unlock_release_mem;
+	}
+	cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		cam_hw_cdm_iommu_fault_handler, cdm_hw);
+
+	rc = cam_smmu_ops(cdm_core->iommu_hdl.non_secure, CAM_SMMU_ATTACH);
+	if (rc < 0) {
+		CAM_ERR(CAM_CDM, "Attach iommu non secure handle failed");
+		goto destroy_non_secure_hdl;
+	}
+	cdm_core->iommu_hdl.secure = -1;
+
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+
+	rc = cam_soc_util_request_platform_resource(&cdm_hw->soc_info,
+			cam_hw_cdm_irq, cdm_hw);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to request platform resource");
+		goto destroy_non_secure_hdl;
+	}
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cpas-cdm", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
+		goto release_platform_resource;
+	}
+	CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	rc = cam_cpas_start(cdm_core->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CPAS start failed");
+		goto cpas_unregister;
+	}
+
+	rc = cam_hw_cdm_init(cdm_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to Init CDM HW");
+		goto cpas_stop;
+	}
+	cdm_hw->open_count++;
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_HW_VERSION,
+		&cdm_core->hw_version)) {
+		CAM_ERR(CAM_CDM, "Failed to read CDM HW Version");
+		goto deinit;
+	}
+
+	if (cam_cdm_read_hw_reg(cdm_hw, CDM_CFG_TITAN_VERSION,
+		&cdm_core->hw_family_version)) {
+		CAM_ERR(CAM_CDM, "Failed to read CDM family Version");
+		goto deinit;
+	}
+
+	CAM_DBG(CAM_CDM, "CDM Hw version read success family =%x hw =%x",
+		cdm_core->hw_family_version, cdm_core->hw_version);
+	cdm_core->ops = cam_cdm_get_ops(cdm_core->hw_version, NULL,
+		false);
+	if (!cdm_core->ops) {
+		CAM_ERR(CAM_CDM, "Failed to util ops for hw");
+		goto deinit;
+	}
+
+	if (!cam_cdm_set_cam_hw_version(cdm_core->hw_version,
+		&cdm_core->version)) {
+		CAM_ERR(CAM_CDM, "Failed to set cam he version for hw");
+		goto deinit;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to Deinit CDM HW");
+		cdm_hw->open_count--;
+		goto cpas_stop;
+	}
+
+	rc = cam_cpas_stop(cdm_core->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CPAS stop failed");
+		cdm_hw->open_count--;
+		goto cpas_unregister;
+	}
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+		soc_private, CAM_HW_CDM, &cdm_core->index);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "HW CDM Interface registration failed");
+		cdm_hw->open_count--;
+		goto cpas_unregister;
+	}
+	cdm_hw->open_count--;
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
+
+	return rc;
+
+deinit:
+	if (cam_hw_cdm_deinit(cdm_hw, NULL, 0))
+		CAM_ERR(CAM_CDM, "Deinit failed for hw");
+	cdm_hw->open_count--;
+cpas_stop:
+	if (cam_cpas_stop(cdm_core->cpas_handle))
+		CAM_ERR(CAM_CDM, "CPAS stop failed");
+cpas_unregister:
+	if (cam_cpas_unregister_client(cdm_core->cpas_handle))
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
+release_platform_resource:
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		CAM_ERR(CAM_CDM, "Release platform resource failed");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+destroy_non_secure_hdl:
+	cam_smmu_set_client_page_fault_handler(cdm_core->iommu_hdl.non_secure,
+		NULL, cdm_hw);
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
+unlock_release_mem:
+	mutex_unlock(&cdm_hw->hw_mutex);
+release_private_mem:
+	kfree(cdm_hw->soc_info.soc_private);
+release_mem:
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	return rc;
+}
+
+int cam_hw_cdm_remove(struct platform_device *pdev)
+{
+	int rc = -EBUSY;
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		CAM_ERR(CAM_CDM, "Failed to get dev private data");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		CAM_ERR(CAM_CDM,
+			"Failed to get hw private data for type=%d idx=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		CAM_ERR(CAM_CDM,
+			"Failed to get hw core data for type=%d idx=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	if (cdm_hw->open_count != 0) {
+		CAM_ERR(CAM_CDM, "Hw open count invalid type=%d idx=%d cnt=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx,
+			cdm_hw->open_count);
+		return rc;
+	}
+
+	rc = cam_hw_cdm_deinit(cdm_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Deinit failed for hw");
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
+		return rc;
+	}
+
+	if (cam_soc_util_release_platform_resource(&cdm_hw->soc_info))
+		CAM_ERR(CAM_CDM, "Release platform resource failed");
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+
+	if (cam_smmu_destroy_handle(cdm_core->iommu_hdl.non_secure))
+		CAM_ERR(CAM_CDM, "Release iommu secure hdl failed");
+	cam_smmu_unset_client_page_fault_handler(
+		cdm_core->iommu_hdl.non_secure, cdm_hw);
+
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw_intf);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+
+	return 0;
+}
+
+static struct platform_driver cam_hw_cdm_driver = {
+	.probe = cam_hw_cdm_probe,
+	.remove = cam_hw_cdm_remove,
+	.driver = {
+		.name = "msm_cam_cdm",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cam_hw_cdm_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_hw_cdm_init_module(void)
+{
+	return platform_driver_register(&cam_hw_cdm_driver);
+}
+
+static void __exit cam_hw_cdm_exit_module(void)
+{
+	platform_driver_unregister(&cam_hw_cdm_driver);
+}
+
+module_init(cam_hw_cdm_init_module);
+module_exit(cam_hw_cdm_exit_module);
+MODULE_DESCRIPTION("MSM Camera HW CDM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
new file mode 100644
index 0000000..4bda174
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_virtual.h"
+#include "cam_soc_util.h"
+#include "cam_cdm_soc.h"
+
+static struct cam_cdm_intf_mgr cdm_mgr;
+static DEFINE_MUTEX(cam_cdm_mgr_lock);
+
+static const struct of_device_id msm_cam_cdm_intf_dt_match[] = {
+	{ .compatible = "qcom,cam-cdm-intf", },
+	{}
+};
+
+static int get_cdm_mgr_refcount(void)
+{
+	int rc = 0;
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
+		rc = -EPERM;
+	} else {
+		CAM_DBG(CAM_CDM, "CDM intf mgr get refcount=%d",
+			cdm_mgr.refcount);
+		cdm_mgr.refcount++;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static void put_cdm_mgr_refcount(void)
+{
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.probe_done == false) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr not probed yet");
+	} else {
+		CAM_DBG(CAM_CDM, "CDM intf mgr put refcount=%d",
+			cdm_mgr.refcount);
+		if (cdm_mgr.refcount > 0) {
+			cdm_mgr.refcount--;
+		} else {
+			CAM_ERR(CAM_CDM, "Refcount put when zero");
+			WARN_ON(1);
+		}
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+}
+
+static int get_cdm_iommu_handle(struct cam_iommu_handle *cdm_handles,
+	uint32_t hw_idx)
+{
+	int rc = -EPERM;
+	struct cam_hw_intf *hw = cdm_mgr.nodes[hw_idx].device;
+
+	if (hw->hw_ops.get_hw_caps) {
+		rc = hw->hw_ops.get_hw_caps(hw->hw_priv, cdm_handles,
+			sizeof(struct cam_iommu_handle));
+	}
+
+	return rc;
+}
+
+static int get_cdm_index_by_id(char *identifier,
+	uint32_t cell_index, uint32_t *hw_index)
+{
+	int rc = -EPERM, i, j;
+	char client_name[128];
+
+	CAM_DBG(CAM_CDM, "Looking for HW id of =%s and index=%d",
+		identifier, cell_index);
+	snprintf(client_name, sizeof(client_name), "%s", identifier);
+	CAM_DBG(CAM_CDM, "Looking for HW id of %s count:%d", client_name,
+		cdm_mgr.cdm_count);
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		CAM_DBG(CAM_CDM, "dt_num_supported_clients=%d",
+			cdm_mgr.nodes[i].data->dt_num_supported_clients);
+
+		for (j = 0; j <
+			cdm_mgr.nodes[i].data->dt_num_supported_clients; j++) {
+			CAM_DBG(CAM_CDM, "client name:%s",
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j]);
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				client_name)) {
+				rc = 0;
+				*hw_index = i;
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+
+	return rc;
+}
+
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles)
+{
+	int i, j, rc = -EPERM;
+
+	if ((!identifier) || (!cdm_handles))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+	CAM_DBG(CAM_CDM, "Looking for Iommu handle of %s", identifier);
+
+	for (i = 0; i < cdm_mgr.cdm_count; i++) {
+		mutex_lock(&cdm_mgr.nodes[i].lock);
+		if (!cdm_mgr.nodes[i].data) {
+			mutex_unlock(&cdm_mgr.nodes[i].lock);
+			continue;
+		}
+		for (j = 0; j <
+			 cdm_mgr.nodes[i].data->dt_num_supported_clients;
+			j++) {
+			if (!strcmp(
+				cdm_mgr.nodes[i].data->dt_cdm_client_name[j],
+				identifier)) {
+				rc = get_cdm_iommu_handle(cdm_handles, i);
+				break;
+			}
+		}
+		mutex_unlock(&cdm_mgr.nodes[i].lock);
+		if (rc == 0)
+			break;
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_get_iommu_handle);
+
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data)
+{
+	int rc = -EPERM;
+	struct cam_hw_intf *hw;
+	uint32_t hw_index = 0;
+
+	if ((!data) || (!data->base_array_cnt))
+		return -EINVAL;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+
+	if (data->id > CAM_CDM_HW_ANY) {
+		CAM_ERR(CAM_CDM,
+			"only CAM_CDM_VIRTUAL/CAM_CDM_HW_ANY is supported");
+		rc = -EPERM;
+		goto end;
+	}
+	rc = get_cdm_index_by_id(data->identifier, data->cell_index,
+		&hw_index);
+	if ((rc < 0) && (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM)) {
+		CAM_ERR(CAM_CDM, "Failed to identify associated hw id");
+		goto end;
+	} else {
+		CAM_DBG(CAM_CDM, "hw_index:%d", hw_index);
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_ACQUIRE, data,
+					sizeof(struct cam_cdm_acquire_data));
+			if (rc < 0) {
+				CAM_ERR(CAM_CDM, "CDM hw acquire failed");
+				goto end;
+			}
+		} else {
+			CAM_ERR(CAM_CDM, "idx %d doesn't have acquire ops",
+				hw_index);
+			rc = -EPERM;
+		}
+	}
+end:
+	if (rc < 0) {
+		CAM_ERR(CAM_CDM, "CDM acquire failed for id=%d name=%s, idx=%d",
+			data->id, data->identifier, data->cell_index);
+		put_cdm_mgr_refcount();
+	}
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_acquire);
+
+int cam_cdm_release(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -EPERM;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RELEASE, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				CAM_ERR(CAM_CDM,
+					"hw release failed for handle=%x",
+					handle);
+		} else
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
+				hw_index);
+	}
+	put_cdm_mgr_refcount();
+	if (rc == 0)
+		put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_release);
+
+
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data)
+{
+	uint32_t hw_index;
+	int rc = -EINVAL;
+	struct cam_hw_intf *hw;
+
+	if (!data)
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		struct cam_cdm_hw_intf_cmd_submit_bl req;
+
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			req.data = data;
+			req.handle = handle;
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+				CAM_CDM_HW_INTF_CMD_SUBMIT_BL, &req,
+				sizeof(struct cam_cdm_hw_intf_cmd_submit_bl));
+			if (rc < 0)
+				CAM_ERR(CAM_CDM,
+					"hw submit bl failed for handle=%x",
+					handle);
+		} else {
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have submit ops",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_submit_bls);
+
+int cam_cdm_stream_on(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -EINVAL;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+			if (hw && hw->hw_ops.start) {
+				rc = hw->hw_ops.start(hw->hw_priv, &handle,
+						sizeof(uint32_t));
+				if (rc < 0)
+					CAM_ERR(CAM_CDM,
+						"hw start failed handle=%x",
+						handle);
+			} else {
+				CAM_ERR(CAM_CDM,
+					"hw idx %d doesn't have start ops",
+					hw_index);
+			}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_on);
+
+int cam_cdm_stream_off(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -EINVAL;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.stop) {
+			rc = hw->hw_ops.stop(hw->hw_priv, &handle,
+					sizeof(uint32_t));
+			if (rc < 0)
+				CAM_ERR(CAM_CDM, "hw stop failed handle=%x",
+					handle);
+		} else {
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have stop ops",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_stream_off);
+
+int cam_cdm_reset_hw(uint32_t handle)
+{
+	uint32_t hw_index;
+	int rc = -EINVAL;
+	struct cam_hw_intf *hw;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	hw_index = CAM_CDM_GET_HW_IDX(handle);
+	if (hw_index < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM) {
+		hw = cdm_mgr.nodes[hw_index].device;
+		if (hw && hw->hw_ops.process_cmd) {
+			rc = hw->hw_ops.process_cmd(hw->hw_priv,
+					CAM_CDM_HW_INTF_CMD_RESET_HW, &handle,
+					sizeof(handle));
+			if (rc < 0)
+				CAM_ERR(CAM_CDM,
+					"CDM hw release failed for handle=%x",
+					handle);
+		} else {
+			CAM_ERR(CAM_CDM, "hw idx %d doesn't have release ops",
+				hw_index);
+		}
+	}
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cdm_reset_hw);
+
+int cam_cdm_intf_register_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t *index)
+{
+	int rc = -EINVAL;
+
+	if ((!hw) || (!data) || (!index))
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(!cdm_mgr.nodes[CAM_SW_CDM_INDEX].device)) {
+		mutex_lock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = hw;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[CAM_SW_CDM_INDEX].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) && (cdm_mgr.cdm_count > 0)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[cdm_mgr.cdm_count].device = hw;
+		cdm_mgr.nodes[cdm_mgr.cdm_count].data = data;
+		*index = cdm_mgr.cdm_count;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.cdm_count++;
+		rc = 0;
+	} else {
+		CAM_ERR(CAM_CDM, "CDM registration failed type=%d count=%d",
+			type, cdm_mgr.cdm_count);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+int cam_cdm_intf_deregister_hw_cdm(struct cam_hw_intf *hw,
+	struct cam_cdm_private_dt_data *data, enum cam_cdm_type type,
+	uint32_t index)
+{
+	int rc = -EINVAL;
+
+	if ((!hw) || (!data))
+		return rc;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		rc = -EPERM;
+		return rc;
+	}
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if ((type == CAM_VIRTUAL_CDM) &&
+		(hw == cdm_mgr.nodes[CAM_SW_CDM_INDEX].device) &&
+		(index == CAM_SW_CDM_INDEX)) {
+		mutex_lock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].device = NULL;
+		cdm_mgr.nodes[CAM_SW_CDM_INDEX].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[cdm_mgr.cdm_count].lock);
+		rc = 0;
+	} else if ((type == CAM_HW_CDM) &&
+		(hw == cdm_mgr.nodes[index].device)) {
+		mutex_lock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.nodes[index].device = NULL;
+		cdm_mgr.nodes[index].data = NULL;
+		mutex_unlock(&cdm_mgr.nodes[index].lock);
+		cdm_mgr.cdm_count--;
+		rc = 0;
+	} else {
+		CAM_ERR(CAM_CDM, "CDM Deregistration failed type=%d index=%d",
+			type, index);
+	}
+	mutex_unlock(&cam_cdm_mgr_lock);
+	put_cdm_mgr_refcount();
+
+	return rc;
+}
+
+static int cam_cdm_intf_probe(struct platform_device *pdev)
+{
+	int i, rc;
+
+	rc = cam_cdm_intf_mgr_soc_get_dt_properties(pdev, &cdm_mgr);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to get dt properties");
+		return rc;
+	}
+	mutex_lock(&cam_cdm_mgr_lock);
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		mutex_init(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = true;
+	cdm_mgr.refcount = 0;
+	mutex_unlock(&cam_cdm_mgr_lock);
+	rc = cam_virtual_cdm_probe(pdev);
+	if (rc) {
+		mutex_lock(&cam_cdm_mgr_lock);
+		cdm_mgr.probe_done = false;
+		for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+			if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+				(cdm_mgr.nodes[i].refcount != 0))
+				CAM_ERR(CAM_CDM,
+					"Valid node present in index=%d", i);
+			mutex_destroy(&cdm_mgr.nodes[i].lock);
+			cdm_mgr.nodes[i].device = NULL;
+			cdm_mgr.nodes[i].data = NULL;
+			cdm_mgr.nodes[i].refcount = 0;
+		}
+		mutex_unlock(&cam_cdm_mgr_lock);
+	}
+
+	CAM_DBG(CAM_CDM, "CDM Intf probe done");
+
+	return rc;
+}
+
+static int cam_cdm_intf_remove(struct platform_device *pdev)
+{
+	int i, rc = -EBUSY;
+
+	if (get_cdm_mgr_refcount()) {
+		CAM_ERR(CAM_CDM, "CDM intf mgr get refcount failed");
+		return rc;
+	}
+
+	if (cam_virtual_cdm_remove(pdev)) {
+		CAM_ERR(CAM_CDM, "Virtual CDM remove failed");
+		goto end;
+	}
+	put_cdm_mgr_refcount();
+
+	mutex_lock(&cam_cdm_mgr_lock);
+	if (cdm_mgr.refcount != 0) {
+		CAM_ERR(CAM_CDM, "cdm manger refcount not zero %d",
+			cdm_mgr.refcount);
+		goto end;
+	}
+
+	for (i = 0 ; i < CAM_CDM_INTF_MGR_MAX_SUPPORTED_CDM; i++) {
+		if (cdm_mgr.nodes[i].device || cdm_mgr.nodes[i].data ||
+			(cdm_mgr.nodes[i].refcount != 0)) {
+			CAM_ERR(CAM_CDM, "Valid node present in index=%d", i);
+			mutex_unlock(&cam_cdm_mgr_lock);
+			goto end;
+		}
+		mutex_destroy(&cdm_mgr.nodes[i].lock);
+		cdm_mgr.nodes[i].device = NULL;
+		cdm_mgr.nodes[i].data = NULL;
+		cdm_mgr.nodes[i].refcount = 0;
+	}
+	cdm_mgr.probe_done = false;
+	rc = 0;
+
+end:
+	mutex_unlock(&cam_cdm_mgr_lock);
+	return rc;
+}
+
+static struct platform_driver cam_cdm_intf_driver = {
+	.probe = cam_cdm_intf_probe,
+	.remove = cam_cdm_intf_remove,
+	.driver = {
+	.name = "msm_cam_cdm_intf",
+	.owner = THIS_MODULE,
+	.of_match_table = msm_cam_cdm_intf_dt_match,
+	.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_cdm_intf_init_module(void)
+{
+	return platform_driver_register(&cam_cdm_intf_driver);
+}
+
+static void __exit cam_cdm_intf_exit_module(void)
+{
+	platform_driver_unregister(&cam_cdm_intf_driver);
+}
+
+module_init(cam_cdm_intf_init_module);
+module_exit(cam_cdm_intf_exit_module);
+MODULE_DESCRIPTION("MSM Camera CDM Intf driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
new file mode 100644
index 0000000..1f840fe
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_intf_api.h
@@ -0,0 +1,202 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_API_H_
+#define _CAM_CDM_API_H_
+
+#include <media/cam_defs.h>
+#include "cam_cdm_util.h"
+#include "cam_soc_util.h"
+
+/* enum cam_cdm_id - Enum for possible CAM CDM hardwares */
+enum cam_cdm_id {
+	CAM_CDM_VIRTUAL,
+	CAM_CDM_HW_ANY,
+	CAM_CDM_CPAS_0,
+	CAM_CDM_IPE0,
+	CAM_CDM_IPE1,
+	CAM_CDM_BPS,
+	CAM_CDM_VFE,
+	CAM_CDM_MAX
+};
+
+/* enum cam_cdm_cb_status - Enum for possible CAM CDM callback */
+enum cam_cdm_cb_status {
+	CAM_CDM_CB_STATUS_BL_SUCCESS,
+	CAM_CDM_CB_STATUS_INVALID_BL_CMD,
+	CAM_CDM_CB_STATUS_PAGEFAULT,
+	CAM_CDM_CB_STATUS_HW_RESET_ONGOING,
+	CAM_CDM_CB_STATUS_HW_RESET_DONE,
+	CAM_CDM_CB_STATUS_UNKNOWN_ERROR,
+};
+
+/* enum cam_cdm_bl_cmd_addr_type - Enum for possible CDM bl cmd addr types */
+enum cam_cdm_bl_cmd_addr_type {
+	CAM_CDM_BL_CMD_TYPE_MEM_HANDLE,
+	CAM_CDM_BL_CMD_TYPE_HW_IOVA,
+	CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA,
+};
+
+/**
+ * struct cam_cdm_acquire_data - Cam CDM acquire data structure
+ *
+ * @identifier : Input identifier string which is the device label from dt
+ *                    like vfe, ife, jpeg etc
+ * @cell_index : Input integer identifier pointing to the cell index from dt
+ *                     of the device. This can be used to form a unique string
+ *                     with @identifier like vfe0, ife1, jpeg0 etc
+ * @id : ID of a specific or any CDM HW which needs to be acquired.
+ * @userdata : Input private data which will be returned as part
+ *             of callback.
+ * @cam_cdm_callback : Input callback pointer for triggering the
+ *                     callbacks from CDM driver
+ *                     @handle : CDM Client handle
+ *                     @userdata : Private data given at the time of acquire
+ *                     @status : Callback status
+ *                     @cookie : Cookie if the callback is gen irq status
+ * @base_array_cnt : Input number of ioremapped address pair pointing
+ *                   in base_array, needed only if selected cdm is a virtual.
+ * @base_array : Input pointer to ioremapped address pair arrary
+ *               needed only if selected cdm is a virtual.
+ * @cdm_version : CDM version is output while acquiring HW cdm and
+ *                it is Input while acquiring virtual cdm, Currently fixing it
+ *                to one version below acquire API.
+ * @ops : Output pointer updated by cdm driver to the CDM
+ *                     util ops for this HW version of CDM acquired.
+ * @handle  : Output Unique handle generated for this acquire
+ *
+ */
+struct cam_cdm_acquire_data {
+	char identifier[128];
+	uint32_t cell_index;
+	enum cam_cdm_id id;
+	void *userdata;
+	void (*cam_cdm_callback)(uint32_t handle, void *userdata,
+		enum cam_cdm_cb_status status, uint64_t cookie);
+	uint32_t base_array_cnt;
+	struct cam_soc_reg_map *base_array[CAM_SOC_MAX_BLOCK];
+	struct cam_hw_version cdm_version;
+	struct cam_cdm_utils_ops *ops;
+	uint32_t handle;
+};
+
+/**
+ * struct cam_cdm_bl_cmd - Cam CDM HW bl command
+ *
+ * @bl_addr : Union of all three type for CDM BL commands
+ * @mem_handle : Input mem handle of bl cmd
+ * @offset : Input offset of the actual bl cmd in the memory pointed
+ *           by mem_handle
+ * @len : Input length of the BL command, Cannot be more than 1MB and
+ *           this is will be validated with offset+size of the memory pointed
+ *           by mem_handle
+ *
+ */
+struct cam_cdm_bl_cmd {
+	union {
+		int32_t mem_handle;
+		uint32_t *hw_iova;
+		uintptr_t kernel_iova;
+	} bl_addr;
+	uint32_t offset;
+	uint32_t len;
+};
+
+/**
+ * struct cam_cdm_bl_request - Cam CDM HW base & length (BL) request
+ *
+ * @flag : 1 for callback needed and 0 for no callback when this BL
+ *            request is done
+ * @userdata :Input private data which will be returned as part
+ *             of callback if request for this bl request in flags.
+ * @cookie : Cookie if the callback is gen irq status
+ * @type : type of the submitted bl cmd address.
+ * @cmd_arrary_count : Input number of BL commands to be submitted to CDM
+ * @bl_cmd_array     : Input payload holding the BL cmd's arrary
+ *                     to be sumbitted.
+ *
+ */
+struct cam_cdm_bl_request {
+	int flag;
+	void *userdata;
+	uint64_t cookie;
+	enum cam_cdm_bl_cmd_addr_type type;
+	uint32_t cmd_arrary_count;
+	struct cam_cdm_bl_cmd cmd[1];
+};
+
+/**
+ * @brief : API to get the CDM capabilities for a camera device type
+ *
+ * @identifier : Input pointer to a string which is the device label from dt
+ *                   like vfe, ife, jpeg etc, We do not need cell index
+ *                   assuming all devices of a single type maps to one SMMU
+ *                   client
+ * @cdm_handles : Input iommu handle memory pointer to update handles
+ *
+ * @return 0 on success
+ */
+int cam_cdm_get_iommu_handle(char *identifier,
+	struct cam_iommu_handle *cdm_handles);
+
+/**
+ * @brief : API to acquire a CDM
+ *
+ * @data : Input data for the CDM to be acquired
+ *
+ * @return 0 on success
+ */
+int cam_cdm_acquire(struct cam_cdm_acquire_data *data);
+
+/**
+ * @brief : API to release a previously acquired CDM
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_release(uint32_t handle);
+
+/**
+ * @brief : API to submit the base & length (BL's) for acquired CDM
+ *
+ * @handle : Input cdm handle to which the BL's needs to be sumbitted.
+ * @data   : Input pointer to the BL's to be sumbitted
+ *
+ * @return 0 on success
+ */
+int cam_cdm_submit_bls(uint32_t handle, struct cam_cdm_bl_request *data);
+
+/**
+ * @brief : API to stream ON a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_on(uint32_t handle);
+
+/**
+ * @brief : API to stream OFF a previously acquired CDM,
+ *          during this we turn on/off clocks/power based on active clients.
+ *
+ * @handle : Input handle for the CDM to be released
+ *
+ * @return 0 on success
+ */
+int cam_cdm_stream_off(uint32_t handle);
+
+/**
+ * @brief : API to reset previously acquired CDM,
+ *          this can be only performed only the CDM is private.
+ *
+ * @handle : Input handle of the CDM to reset
+ *
+ * @return 0 on success
+ */
+int cam_cdm_reset_hw(uint32_t handle);
+
+#endif /* _CAM_CDM_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
new file mode 100644
index 0000000..278424c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_OFFSET_FROM_REG(x, y) ((x)->offsets[y].offset)
+#define CAM_CDM_ATTR_FROM_REG(x, y) ((x)->offsets[y].attribute)
+
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CAM_DBG(CAM_CDM, "E: b=%pK blen=%d reg=%x off=%x", (void __iomem *)base,
+		(int)mem_len, reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl,
+		reg)));
+	CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x", (void __iomem *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)));
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "Invalid reg=%d\n", reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+		if (reg_addr > (base + mem_len)) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Invalid mapped region %d", reg);
+			goto permission_error;
+		}
+		*value = cam_io_r_mb(reg_addr);
+		CAM_DBG(CAM_CDM, "X b=%pK reg=%x off=%x val=%x",
+			(void __iomem *)base, reg,
+			(CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)),
+			*value);
+		return false;
+	}
+permission_error:
+	*value = 0;
+	return true;
+
+}
+
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value)
+{
+	void __iomem *reg_addr;
+	struct cam_cdm *cdm = (struct cam_cdm *)cdm_hw->core_info;
+	void __iomem *base =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].mem_base;
+	resource_size_t mem_len =
+		cdm_hw->soc_info.reg_map[CAM_HW_CDM_BASE_INDEX].size;
+
+	CAM_DBG(CAM_CDM, "E: b=%pK reg=%x off=%x val=%x", (void __iomem *)base,
+		reg, (CAM_CDM_OFFSET_FROM_REG(cdm->offset_tbl, reg)), value);
+
+	if ((reg > cdm->offset_tbl->offset_max_size) ||
+		(reg > cdm->offset_tbl->last_offset)) {
+		CAM_ERR_RATE_LIMIT(CAM_CDM, "CDM accessing invalid reg=%d\n",
+			reg);
+		goto permission_error;
+	} else {
+		reg_addr = (base + CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg));
+		if (reg_addr > (base + mem_len)) {
+			CAM_ERR_RATE_LIMIT(CAM_CDM,
+				"Accessing invalid region %d:%d\n",
+				reg, (CAM_CDM_OFFSET_FROM_REG(
+				cdm->offset_tbl, reg)));
+			goto permission_error;
+		}
+		cam_io_w_mb(value, reg_addr);
+		return false;
+	}
+permission_error:
+	return true;
+
+}
+
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr)
+{
+	int i, rc = -EINVAL;
+
+	ptr->dt_num_supported_clients = of_property_count_strings(
+						pdev->dev.of_node,
+						"cdm-client-names");
+	CAM_DBG(CAM_CDM, "Num supported cdm_client = %d",
+		ptr->dt_num_supported_clients);
+	if (ptr->dt_num_supported_clients >
+		CAM_PER_CDM_MAX_REGISTERED_CLIENTS) {
+		CAM_ERR(CAM_CDM, "Invalid count of client names count=%d",
+			ptr->dt_num_supported_clients);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (ptr->dt_num_supported_clients < 0) {
+		CAM_DBG(CAM_CDM, "No cdm client names found");
+		ptr->dt_num_supported_clients = 0;
+		ptr->dt_cdm_shared = false;
+	} else {
+		ptr->dt_cdm_shared = true;
+	}
+	for (i = 0; i < ptr->dt_num_supported_clients; i++) {
+		rc = of_property_read_string_index(pdev->dev.of_node,
+			"cdm-client-names", i, &(ptr->dt_cdm_client_name[i]));
+		CAM_DBG(CAM_CDM, "cdm-client-names[%d] = %s",	i,
+			ptr->dt_cdm_client_name[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_CDM, "Reading cdm-client-names failed");
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table)
+{
+	int rc;
+	struct cam_hw_soc_info *soc_ptr;
+	const struct of_device_id *id;
+
+	if (!cdm_hw  || (cdm_hw->soc_info.soc_private)
+		|| !(cdm_hw->soc_info.pdev))
+		return -EINVAL;
+
+	soc_ptr = &cdm_hw->soc_info;
+
+	rc = cam_soc_util_get_dt_properties(soc_ptr);
+	if (rc != 0) {
+		CAM_ERR(CAM_CDM, "Failed to retrieve the CDM dt properties");
+	} else {
+		soc_ptr->soc_private = kzalloc(
+				sizeof(struct cam_cdm_private_dt_data),
+				GFP_KERNEL);
+		if (!soc_ptr->soc_private)
+			return -ENOMEM;
+
+		rc = cam_cdm_soc_load_dt_private(soc_ptr->pdev,
+			soc_ptr->soc_private);
+		if (rc != 0) {
+			CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
+			goto error;
+		}
+		id = of_match_node(table, soc_ptr->pdev->dev.of_node);
+		if ((!id) || !(id->data)) {
+			CAM_ERR(CAM_CDM, "Failed to retrieve the CDM id table");
+			goto error;
+		}
+		CAM_DBG(CAM_CDM, "CDM Hw Id compatible =%s", id->compatible);
+		((struct cam_cdm *)cdm_hw->core_info)->offset_tbl =
+			(struct cam_cdm_reg_offset_table *)id->data;
+		strlcpy(((struct cam_cdm *)cdm_hw->core_info)->name,
+			id->compatible,
+			sizeof(((struct cam_cdm *)cdm_hw->core_info)->name));
+	}
+
+	return rc;
+
+error:
+	rc = -EINVAL;
+	kfree(soc_ptr->soc_private);
+	soc_ptr->soc_private = NULL;
+	return rc;
+}
+
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev, struct cam_cdm_intf_mgr *mgr)
+{
+	int rc;
+
+	rc = of_property_read_u32(pdev->dev.of_node,
+		"num-hw-cdm", &mgr->dt_supported_hw_cdm);
+	CAM_DBG(CAM_CDM, "Number of HW cdm supported =%d",
+		mgr->dt_supported_hw_cdm);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h
new file mode 100644
index 0000000..3198853
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_soc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_SOC_H_
+#define _CAM_CDM_SOC_H_
+
+int cam_hw_cdm_soc_get_dt_properties(struct cam_hw_info *cdm_hw,
+	const struct of_device_id *table);
+bool cam_cdm_read_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t *value);
+bool cam_cdm_write_hw_reg(struct cam_hw_info *cdm_hw,
+	enum cam_cdm_regs reg, uint32_t value);
+int cam_cdm_intf_mgr_soc_get_dt_properties(
+	struct platform_device *pdev,
+	struct cam_cdm_intf_mgr *mgr);
+int cam_cdm_soc_load_dt_private(struct platform_device *pdev,
+	struct cam_cdm_private_dt_data *ptr);
+
+#endif /* _CAM_CDM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
new file mode 100644
index 0000000..c00a8a3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.c
@@ -0,0 +1,709 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_DWORD 4
+
+#define CAM_CDM_SW_CMD_COUNT    2
+#define CAM_CMD_LENGTH_MASK     0xFFFF
+#define CAM_CDM_COMMAND_OFFSET  24
+#define CAM_CDM_REG_OFFSET_MASK 0x00FFFFFF
+
+#define CAM_CDM_DMI_DATA_HI_OFFSET   8
+#define CAM_CDM_DMI_DATA_LO_OFFSET   12
+
+static unsigned int CDMCmdHeaderSizes[
+	CAM_CDM_CMD_PRIVATE_BASE + CAM_CDM_SW_CMD_COUNT] = {
+	0, /* UNUSED*/
+	3, /* DMI*/
+	0, /* UNUSED*/
+	2, /* RegContinuous*/
+	1, /* RegRandom*/
+	2, /* BUFFER_INDIREC*/
+	2, /* GenerateIRQ*/
+	3, /* WaitForEvent*/
+	1, /* ChangeBase*/
+	1, /* PERF_CONTROL*/
+	3, /* DMI32*/
+	3, /* DMI64*/
+};
+
+/**
+ * struct cdm_regrandom_cmd - Definition for CDM random register command.
+ * @count: Number of register writes
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ */
+struct cdm_regrandom_cmd {
+	unsigned int count    : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_regcontinuous_cmd - Definition for a CDM register range command.
+ * @count: Number of register writes
+ * @reserved0: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @offset: Start address of the range of registers
+ * @reserved1: reserved bits
+ */
+struct cdm_regcontinuous_cmd {
+	unsigned int count     : 16;
+	unsigned int reserved0 : 8;
+	unsigned int cmd       : 8;
+	unsigned int offset    : 24;
+	unsigned int reserved1 : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_dmi_cmd - Definition for a CDM DMI command.
+ * @length: Number of bytes in LUT - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr: Address of the LUT in memory
+ * @DMIAddr: Address of the target DMI config register
+ * @DMISel: DMI identifier
+ */
+struct cdm_dmi_cmd {
+	unsigned int length   : 16;
+	unsigned int reserved : 8;
+	unsigned int cmd      : 8;
+	unsigned int addr;
+	unsigned int DMIAddr  : 24;
+	unsigned int DMISel   : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_indirect_cmd - Definition for a CDM indirect buffer command.
+ * @length: Number of bytes in buffer - 1
+ * @reserved: reserved bits
+ * @cmd: Command ID (CDMCmd)
+ * @addr:  Device address of the indirect buffer
+ */
+struct cdm_indirect_cmd {
+	unsigned int length     : 16;
+	unsigned int reserved   : 8;
+	unsigned int cmd        : 8;
+	unsigned int addr;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_changebase_cmd - Definition for CDM base address change command.
+ * @base: Base address to be changed to
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_changebase_cmd {
+	unsigned int base   : 24;
+	unsigned int cmd    : 8;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_wait_event_cmd - Definition for a CDM Gen IRQ command.
+ * @mask: Mask for the events
+ * @id: ID to read back for debug
+ * @iw_reserved: reserved bits
+ * @iw: iw AHB write bit
+ * @cmd:Command ID (CDMCmd)
+ * @offset: Offset to where data is written
+ * @offset_reserved: reserved bits
+ * @data: data returned in IRQ_USR_DATA
+ */
+struct cdm_wait_event_cmd {
+	unsigned int mask             : 8;
+	unsigned int id               : 8;
+	unsigned int iw_reserved      : 7;
+	unsigned int iw               : 1;
+	unsigned int cmd              : 8;
+	unsigned int offset           : 24;
+	unsigned int offset_reserved  : 8;
+	unsigned int data;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_genirq_cmd - Definition for a CDM Wait event command.
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ * @userdata: userdata returned in IRQ_USR_DATA
+ */
+struct cdm_genirq_cmd {
+	unsigned int reserved   : 24;
+	unsigned int cmd        : 8;
+	unsigned int userdata;
+} __attribute__((__packed__));
+
+/**
+ * struct cdm_perf_ctrl_cmd_t - Definition for CDM perf control command.
+ * @perf: perf command
+ * @reserved: reserved bits
+ * @cmd:Command ID (CDMCmd)
+ */
+struct cdm_perf_ctrl_cmd {
+	unsigned int perf     : 2;
+	unsigned int reserved : 22;
+	unsigned int cmd      : 8;
+} __attribute__((__packed__));
+
+uint32_t cdm_get_cmd_header_size(unsigned int command)
+{
+	return CDMCmdHeaderSizes[command];
+}
+
+uint32_t cdm_required_size_reg_continuous(uint32_t  numVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT) + numVals;
+}
+
+uint32_t cdm_required_size_reg_random(uint32_t numRegVals)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM) +
+		(2 * numRegVals);
+}
+
+uint32_t cdm_required_size_dmi(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+}
+
+uint32_t cdm_required_size_genirq(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_GEN_IRQ);
+}
+
+uint32_t cdm_required_size_indirect(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+}
+
+uint32_t cdm_required_size_changebase(void)
+{
+	return cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+}
+
+uint32_t cdm_offsetof_dmi_addr(void)
+{
+	return offsetof(struct cdm_dmi_cmd, addr);
+}
+
+uint32_t cdm_offsetof_indirect_addr(void)
+{
+	return offsetof(struct cdm_indirect_cmd, addr);
+}
+
+uint32_t *cdm_write_regcontinuous(uint32_t *pCmdBuffer, uint32_t reg,
+	uint32_t numVals, uint32_t *pVals)
+{
+	uint32_t i;
+	struct cdm_regcontinuous_cmd *pHeader =
+		(struct cdm_regcontinuous_cmd *)pCmdBuffer;
+
+	pHeader->count = numVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_CONT;
+	pHeader->reserved0 = 0;
+	pHeader->reserved1 = 0;
+	pHeader->offset = reg;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+
+	for (i = 0; i < numVals; i++)
+		(((uint32_t *)pCmdBuffer)[i]) = (((uint32_t *)pVals)[i]);
+
+	pCmdBuffer += numVals;
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_regrandom(uint32_t *pCmdBuffer, uint32_t numRegVals,
+	uint32_t *pRegVals)
+{
+	uint32_t i;
+	uint32_t *dst, *src;
+	struct cdm_regrandom_cmd *pHeader =
+		(struct cdm_regrandom_cmd *)pCmdBuffer;
+
+	pHeader->count = numRegVals;
+	pHeader->cmd = CAM_CDM_CMD_REG_RANDOM;
+	pHeader->reserved = 0;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+	dst = pCmdBuffer;
+	src = pRegVals;
+	for (i = 0; i < numRegVals; i++) {
+		*dst++ = *src++;
+		*dst++ = *src++;
+	}
+
+	return dst;
+}
+
+uint32_t *cdm_write_dmi(uint32_t *pCmdBuffer, uint8_t dmiCmd,
+	uint32_t DMIAddr, uint8_t DMISel, uint32_t dmiBufferAddr,
+	uint32_t length)
+{
+	struct cdm_dmi_cmd *pHeader = (struct cdm_dmi_cmd *)pCmdBuffer;
+
+	pHeader->cmd        = dmiCmd;
+	pHeader->addr = dmiBufferAddr;
+	pHeader->length = length - 1;
+	pHeader->DMIAddr = DMIAddr;
+	pHeader->DMISel = DMISel;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_DMI);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_indirect(uint32_t *pCmdBuffer, uint32_t indirectBufAddr,
+	uint32_t length)
+{
+	struct cdm_indirect_cmd *pHeader =
+		(struct cdm_indirect_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_BUFF_INDIRECT;
+	pHeader->addr = indirectBufAddr;
+	pHeader->length = length - 1;
+
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_BUFF_INDIRECT);
+
+	return pCmdBuffer;
+}
+
+uint32_t *cdm_write_changebase(uint32_t *pCmdBuffer, uint32_t base)
+{
+	struct cdm_changebase_cmd *pHeader =
+		(struct cdm_changebase_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_CHANGE_BASE;
+	pHeader->base = base;
+	pCmdBuffer += cdm_get_cmd_header_size(CAM_CDM_CMD_CHANGE_BASE);
+
+	return pCmdBuffer;
+}
+
+void cdm_write_genirq(uint32_t *pCmdBuffer, uint32_t userdata)
+{
+	struct cdm_genirq_cmd *pHeader = (struct cdm_genirq_cmd *)pCmdBuffer;
+
+	pHeader->cmd = CAM_CDM_CMD_GEN_IRQ;
+	pHeader->userdata = userdata;
+}
+
+struct cam_cdm_utils_ops CDM170_ops = {
+	cdm_get_cmd_header_size,
+	cdm_required_size_reg_continuous,
+	cdm_required_size_reg_random,
+	cdm_required_size_dmi,
+	cdm_required_size_genirq,
+	cdm_required_size_indirect,
+	cdm_required_size_changebase,
+	cdm_offsetof_dmi_addr,
+	cdm_offsetof_indirect_addr,
+	cdm_write_regcontinuous,
+	cdm_write_regrandom,
+	cdm_write_dmi,
+	cdm_write_indirect,
+	cdm_write_changebase,
+	cdm_write_genirq,
+};
+
+int cam_cdm_get_ioremap_from_base(uint32_t hw_base,
+	uint32_t base_array_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	void __iomem **device_base)
+{
+	int ret = -EINVAL, i;
+
+	for (i = 0; i < base_array_size; i++) {
+		if (base_table[i])
+			CAM_DBG(CAM_CDM, "In loop %d ioremap for %x addr=%x",
+			i, (base_table[i])->mem_cam_base, hw_base);
+		if ((base_table[i]) &&
+			((base_table[i])->mem_cam_base == hw_base)) {
+			*device_base = (base_table[i])->mem_base;
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_cont_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	int ret = 0;
+	uint32_t *data;
+	struct cdm_regcontinuous_cmd *reg_cont;
+
+	if ((cmd_buf_size < cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) ||
+		(!base_addr)) {
+		CAM_ERR(CAM_CDM, "invalid base addr and data length  %d %pK",
+			cmd_buf_size, base_addr);
+		return -EINVAL;
+	}
+
+	reg_cont = (struct cdm_regcontinuous_cmd *)cmd_buf;
+	if ((!reg_cont->count) || (reg_cont->count > 0x10000) ||
+		(((reg_cont->count * sizeof(uint32_t)) +
+			cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT)) >
+			cmd_buf_size)) {
+		CAM_ERR(CAM_CDM, "buffer size %d is not sufficient for count%d",
+			cmd_buf_size, reg_cont->count);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT);
+	cam_io_memcpy(base_addr + reg_cont->offset,	data,
+		reg_cont->count * sizeof(uint32_t));
+
+	*used_bytes = (reg_cont->count * sizeof(uint32_t)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_CONT));
+
+	return ret;
+}
+
+static int cam_cdm_util_reg_random_write(void __iomem *base_addr,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size, uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_regrandom_cmd *reg_random;
+	uint32_t *data;
+
+	if (!base_addr) {
+		CAM_ERR(CAM_CDM, "invalid base address");
+		return -EINVAL;
+	}
+
+	reg_random = (struct cdm_regrandom_cmd *) cmd_buf;
+	if ((!reg_random->count) || (reg_random->count > 0x10000) ||
+		(((reg_random->count * (sizeof(uint32_t) * 2)) +
+		cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)) >
+			cmd_buf_size)) {
+		CAM_ERR(CAM_CDM, "invalid reg_count  %d cmd_buf_size %d",
+			reg_random->count, cmd_buf_size);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM);
+
+	for (i = 0; i < reg_random->count; i++) {
+		CAM_DBG(CAM_CDM, "reg random: offset %pK, value 0x%x",
+			((void __iomem *)(base_addr + data[0])),
+			data[1]);
+		cam_io_w(data[1], base_addr + data[0]);
+		data += 2;
+	}
+
+	*used_bytes = ((reg_random->count * (sizeof(uint32_t) * 2)) +
+		(4 * cdm_get_cmd_header_size(CAM_CDM_CMD_REG_RANDOM)));
+
+	return 0;
+}
+
+static int cam_cdm_util_swd_dmi_write(uint32_t cdm_cmd_type,
+	void __iomem *base_addr, uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	uint32_t *used_bytes)
+{
+	uint32_t i;
+	struct cdm_dmi_cmd *swd_dmi;
+	uint32_t *data;
+
+	swd_dmi = (struct cdm_dmi_cmd *)cmd_buf;
+
+	if (cmd_buf_size < (cdm_required_size_dmi() + swd_dmi->length + 1)) {
+		CAM_ERR(CAM_CDM, "invalid CDM_SWD_DMI length %d",
+			swd_dmi->length + 1);
+		return -EINVAL;
+	}
+	data = cmd_buf + cdm_required_size_dmi();
+
+	if (cdm_cmd_type == CAM_CDM_CMD_SWD_DMI_64) {
+		for (i = 0; i < (swd_dmi->length + 1)/8; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			cam_io_w_mb(data[1], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_HI_OFFSET);
+			data += 2;
+		}
+	} else {
+		for (i = 0; i < (swd_dmi->length + 1)/4; i++) {
+			cam_io_w_mb(data[0], base_addr +
+				swd_dmi->DMIAddr + CAM_CDM_DMI_DATA_LO_OFFSET);
+			data += 1;
+		}
+	}
+	*used_bytes = (4 * cdm_required_size_dmi()) + swd_dmi->length + 1;
+
+	return 0;
+}
+
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag)
+{
+	int ret = 0;
+	uint32_t cdm_cmd_type = 0, total_cmd_buf_size = 0;
+	uint32_t used_bytes = 0;
+
+	total_cmd_buf_size = cmd_buf_size;
+
+	while (cmd_buf_size > 0) {
+		CAM_DBG(CAM_CDM, "cmd data=%x", *cmd_buf);
+		cdm_cmd_type = (*cmd_buf >> CAM_CDM_COMMAND_OFFSET);
+		switch (cdm_cmd_type) {
+		case CAM_CDM_CMD_REG_CONT: {
+			ret = cam_cdm_util_reg_cont_write(*current_device_base,
+				cmd_buf, cmd_buf_size, &used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes/4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_REG_RANDOM: {
+			ret = cam_cdm_util_reg_random_write(
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_SWD_DMI_32:
+		case CAM_CDM_CMD_SWD_DMI_64: {
+			if (*current_device_base == 0) {
+				CAM_ERR(CAM_CDM,
+					"Got SWI DMI cmd =%d for invalid hw",
+					cdm_cmd_type);
+				ret = -EINVAL;
+				break;
+			}
+			ret = cam_cdm_util_swd_dmi_write(cdm_cmd_type,
+				*current_device_base, cmd_buf, cmd_buf_size,
+				&used_bytes);
+			if (ret)
+				break;
+
+			if (used_bytes > 0) {
+				cmd_buf_size -= used_bytes;
+				cmd_buf += used_bytes / 4;
+			}
+			}
+			break;
+		case CAM_CDM_CMD_CHANGE_BASE: {
+			struct cdm_changebase_cmd *change_base_cmd =
+				(struct cdm_changebase_cmd *)cmd_buf;
+
+			ret = cam_cdm_get_ioremap_from_base(
+				change_base_cmd->base, base_array_size,
+				base_table, current_device_base);
+			if (ret != 0) {
+				CAM_ERR(CAM_CDM,
+					"Get ioremap change base failed %x",
+					change_base_cmd->base);
+				break;
+			}
+			CAM_DBG(CAM_CDM, "Got ioremap for %x addr=%pK",
+				change_base_cmd->base,
+				current_device_base);
+			cmd_buf_size -= (4 *
+				cdm_required_size_changebase());
+			cmd_buf += cdm_required_size_changebase();
+			}
+			break;
+		default:
+			CAM_ERR(CAM_CDM, "unsupported cdm_cmd_type type 0%x",
+			cdm_cmd_type);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_dmi_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_DMI];
+	CAM_INFO(CAM_CDM, "DMI");
+	return ret;
+}
+
+static long cam_cdm_util_dump_buff_indirect(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_BUFF_INDIRECT];
+	CAM_INFO(CAM_CDM, "Buff Indirect");
+	return ret;
+}
+
+static long cam_cdm_util_dump_reg_cont_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+	struct cdm_regcontinuous_cmd *p_regcont_cmd;
+	uint32_t *temp_ptr = cmd_buf_addr;
+	int i = 0;
+
+	p_regcont_cmd = (struct cdm_regcontinuous_cmd *)temp_ptr;
+	temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_CONT];
+
+	CAM_INFO(CAM_CDM, "REG_CONT: COUNT: %u OFFSET: 0x%X",
+		p_regcont_cmd->count, p_regcont_cmd->offset);
+
+	for (i = 0; i < p_regcont_cmd->count; i++) {
+		CAM_INFO(CAM_CDM, "DATA_%d: 0x%X", i,
+			*temp_ptr);
+		temp_ptr++;
+		ret++;
+	}
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_reg_random_cmd(uint32_t *cmd_buf_addr)
+{
+	struct cdm_regrandom_cmd *p_regrand_cmd;
+	uint32_t *temp_ptr = cmd_buf_addr;
+	long ret = 0;
+	int i = 0;
+
+	p_regrand_cmd = (struct cdm_regrandom_cmd *)temp_ptr;
+	temp_ptr += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_REG_RANDOM];
+
+	CAM_INFO(CAM_CDM, "REG_RAND: COUNT: %u",
+		p_regrand_cmd->count);
+
+	for (i = 0; i < p_regrand_cmd->count; i++) {
+		CAM_INFO(CAM_CDM, "OFFSET_%d: 0x%X DATA_%d: 0x%X",
+			i, *temp_ptr & CAM_CDM_REG_OFFSET_MASK, i,
+			*(temp_ptr + 1));
+		temp_ptr += 2;
+		ret += 2;
+	}
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_gen_irq_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_GEN_IRQ];
+
+	CAM_INFO(CAM_CDM, "GEN_IRQ");
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_wait_event_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_WAIT_EVENT];
+
+	CAM_INFO(CAM_CDM, "WAIT_EVENT");
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_change_base_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+	struct cdm_changebase_cmd *p_cbase_cmd;
+	uint32_t *temp_ptr = cmd_buf_addr;
+
+	p_cbase_cmd = (struct cdm_changebase_cmd *)temp_ptr;
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_CHANGE_BASE];
+
+	CAM_INFO(CAM_CDM, "CHANGE_BASE: 0x%X",
+		p_cbase_cmd->base);
+
+	return ret;
+}
+
+static long cam_cdm_util_dump_perf_ctrl_cmd(uint32_t *cmd_buf_addr)
+{
+	long ret = 0;
+
+	ret += CDMCmdHeaderSizes[CAM_CDM_CMD_PERF_CTRL];
+
+	CAM_INFO(CAM_CDM, "PERF_CTRL");
+
+	return ret;
+}
+
+void cam_cdm_util_dump_cmd_buf(
+	uint32_t *cmd_buf_start, uint32_t *cmd_buf_end)
+{
+	uint32_t *buf_now = cmd_buf_start;
+	uint32_t cmd = 0;
+
+	if (!cmd_buf_start || !cmd_buf_end) {
+		CAM_INFO(CAM_CDM, "Invalid args");
+		return;
+	}
+
+	do {
+		cmd = *buf_now;
+		cmd = cmd >> CAM_CDM_COMMAND_OFFSET;
+
+		switch (cmd) {
+		case CAM_CDM_CMD_DMI:
+		case CAM_CDM_CMD_DMI_32:
+		case CAM_CDM_CMD_DMI_64:
+			buf_now += cam_cdm_util_dump_dmi_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_REG_CONT:
+			buf_now += cam_cdm_util_dump_reg_cont_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_REG_RANDOM:
+			buf_now += cam_cdm_util_dump_reg_random_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_BUFF_INDIRECT:
+			buf_now += cam_cdm_util_dump_buff_indirect(buf_now);
+			break;
+		case CAM_CDM_CMD_GEN_IRQ:
+			buf_now += cam_cdm_util_dump_gen_irq_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_WAIT_EVENT:
+			buf_now += cam_cdm_util_dump_wait_event_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_CHANGE_BASE:
+			buf_now += cam_cdm_util_dump_change_base_cmd(buf_now);
+			break;
+		case CAM_CDM_CMD_PERF_CTRL:
+			buf_now += cam_cdm_util_dump_perf_ctrl_cmd(buf_now);
+			break;
+		default:
+			CAM_INFO(CAM_CDM, "Invalid CMD: 0x%x buf 0x%x",
+				cmd, *buf_now);
+			buf_now++;
+			break;
+		}
+	} while (buf_now <= cmd_buf_end);
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
new file mode 100644
index 0000000..04f2af3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_util.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_UTIL_H_
+#define _CAM_CDM_UTIL_H_
+
+enum cam_cdm_command {
+	CAM_CDM_CMD_UNUSED = 0x0,
+	CAM_CDM_CMD_DMI = 0x1,
+	CAM_CDM_CMD_NOT_DEFINED = 0x2,
+	CAM_CDM_CMD_REG_CONT = 0x3,
+	CAM_CDM_CMD_REG_RANDOM = 0x4,
+	CAM_CDM_CMD_BUFF_INDIRECT = 0x5,
+	CAM_CDM_CMD_GEN_IRQ = 0x6,
+	CAM_CDM_CMD_WAIT_EVENT = 0x7,
+	CAM_CDM_CMD_CHANGE_BASE = 0x8,
+	CAM_CDM_CMD_PERF_CTRL = 0x9,
+	CAM_CDM_CMD_DMI_32 = 0xa,
+	CAM_CDM_CMD_DMI_64 = 0xb,
+	CAM_CDM_CMD_PRIVATE_BASE = 0xc,
+	CAM_CDM_CMD_SWD_DMI_32 = (CAM_CDM_CMD_PRIVATE_BASE + 0x64),
+	CAM_CDM_CMD_SWD_DMI_64 = (CAM_CDM_CMD_PRIVATE_BASE + 0x65),
+	CAM_CDM_CMD_PRIVATE_BASE_MAX = 0x7F
+};
+
+/**
+ * struct cam_cdm_utils_ops - Camera CDM util ops
+ *
+ * @cdm_get_cmd_header_size: Returns the size of the given command header
+ *                           in DWORDs.
+ *      @command Command ID
+ *      @return Size of the command in DWORDs
+ *
+ * @cdm_required_size_reg_continuous: Calculates the size of a reg-continuous
+ *                                    command in dwords.
+ *      @numVals Number of continuous values
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_reg_random: Calculates the size of a reg-random command
+ *                                in dwords.
+ *      @numRegVals  Number of register/value pairs
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_dmi: Calculates the size of a DMI command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_genirq: Calculates size of a Genirq command in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_indirect: Calculates the size of an indirect command
+ *                              in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_required_size_changebase: Calculates the size of a change-base command
+ *                                in dwords.
+ *      @return Size in dwords
+ *
+ * @cdm_offsetof_dmi_addr: Returns the offset of address field in the DMI
+ *                         command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_offsetof_indirect_addr: Returns the offset of address field in the
+ *                              indirect command header.
+ *      @return Offset of addr field
+ *
+ * @cdm_write_regcontinuous: Writes a command into the command buffer.
+ *      @pCmdBuffer:  Pointer to command buffer
+ *      @reg: Beginning of the register address range where
+ *            values will be written.
+ *      @numVals: Number of values (registers) that will be written
+ *      @pVals : An array of values that will be written
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_regrandom: Writes a command into the command buffer in
+ *                       register/value pairs.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @numRegVals: Number of register/value pairs that will be written
+ *      @pRegVals: An array of register/value pairs that will be written
+ *                 The even indices are registers and the odd indices
+ *                 arevalues, e.g., {reg1, val1, reg2, val2, ...}.
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_dmi: Writes a DMI command into the command bufferM.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @dmiCmd: DMI command
+ *      @DMIAddr: Address of the DMI
+ *      @DMISel: Selected bank that the DMI will write to
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_indirect: Writes a indirect command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @indirectBufferAddr: Device address of the indirect cmd buffer.
+ *      @length: Size of data in bytes
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_changebase: Writes a changing CDM (address) base command into
+ *                        the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @base: New base (device) address
+ *      @return Pointer in command buffer pointing past the written commands
+ *
+ * @cdm_write_genirq:  Writes a gen irq command into the command buffer.
+ *      @pCmdBuffer: Pointer to command buffer
+ *      @userdata: userdata or cookie return by hardware during irq.
+ */
+struct cam_cdm_utils_ops {
+uint32_t (*cdm_get_cmd_header_size)(unsigned int command);
+uint32_t (*cdm_required_size_reg_continuous)(uint32_t  numVals);
+uint32_t (*cdm_required_size_reg_random)(uint32_t numRegVals);
+uint32_t (*cdm_required_size_dmi)(void);
+uint32_t (*cdm_required_size_genirq)(void);
+uint32_t (*cdm_required_size_indirect)(void);
+uint32_t (*cdm_required_size_changebase)(void);
+uint32_t (*cdm_offsetof_dmi_addr)(void);
+uint32_t (*cdm_offsetof_indirect_addr)(void);
+uint32_t* (*cdm_write_regcontinuous)(
+	uint32_t *pCmdBuffer,
+	uint32_t reg,
+	uint32_t numVals,
+	uint32_t *pVals);
+uint32_t *(*cdm_write_regrandom)(
+	uint32_t *pCmdBuffer,
+	uint32_t numRegVals,
+	uint32_t *pRegVals);
+uint32_t *(*cdm_write_dmi)(
+	uint32_t *pCmdBuffer,
+	uint8_t  dmiCmd,
+	uint32_t DMIAddr,
+	uint8_t  DMISel,
+	uint32_t dmiBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_indirect)(
+	uint32_t *pCmdBuffer,
+	uint32_t indirectBufferAddr,
+	uint32_t length);
+uint32_t *(*cdm_write_changebase)(
+	uint32_t *pCmdBuffer,
+	uint32_t base);
+void (*cdm_write_genirq)(
+	uint32_t *pCmdBuffer,
+	uint32_t  userdata);
+};
+
+/**
+ * cam_cdm_util_log_cmd_bufs()
+ *
+ * @brief:            Util function to log cdm command buffers
+ *
+ * @cmd_buffer_start: Pointer to start of cmd buffer
+ * @cmd_buffer_end:   Pointer to end of cmd buffer
+ *
+ */
+void cam_cdm_util_dump_cmd_buf(
+	uint32_t *cmd_buffer_start, uint32_t *cmd_buffer_end);
+
+
+
+#endif /* _CAM_CDM_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h
new file mode 100644
index 0000000..8d49d9ae
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CDM_VIRTUAL_H_
+#define _CAM_CDM_VIRTUAL_H_
+
+#include "cam_cdm_intf_api.h"
+
+int cam_virtual_cdm_probe(struct platform_device *pdev);
+int cam_virtual_cdm_remove(struct platform_device *pdev);
+int cam_cdm_util_cmd_buf_write(void __iomem **current_device_base,
+	uint32_t *cmd_buf, uint32_t cmd_buf_size,
+	struct cam_soc_reg_map *base_table[CAM_SOC_MAX_BLOCK],
+	uint32_t base_array_size, uint8_t bl_tag);
+
+#endif /* _CAM_CDM_VIRTUAL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
new file mode 100644
index 0000000..be0f427
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_cdm_virtual_core.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include "cam_soc_util.h"
+#include "cam_smmu_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_cdm.h"
+#include "cam_cdm_util.h"
+#include "cam_cdm_virtual.h"
+#include "cam_cdm_core_common.h"
+#include "cam_cdm_soc.h"
+#include "cam_io_util.h"
+
+#define CAM_CDM_VIRTUAL_NAME "qcom,cam_virtual_cdm"
+
+static void cam_virtual_cdm_work(struct work_struct *work)
+{
+	struct cam_cdm_work_payload *payload;
+	struct cam_hw_info *cdm_hw;
+	struct cam_cdm *core;
+
+	payload = container_of(work, struct cam_cdm_work_payload, work);
+	if (payload) {
+		cdm_hw = payload->hw;
+		core = (struct cam_cdm *)cdm_hw->core_info;
+		if (payload->irq_status & 0x2) {
+			struct cam_cdm_bl_cb_request_entry *node;
+
+			CAM_DBG(CAM_CDM, "CDM HW Gen/inline IRQ with data=%x",
+				payload->irq_data);
+			mutex_lock(&cdm_hw->hw_mutex);
+			node = cam_cdm_find_request_by_bl_tag(
+				payload->irq_data,
+				&core->bl_request_list);
+			if (node) {
+				if (node->request_type ==
+					CAM_HW_CDM_BL_CB_CLIENT) {
+					cam_cdm_notify_clients(cdm_hw,
+						CAM_CDM_CB_STATUS_BL_SUCCESS,
+						(void *)node);
+				} else if (node->request_type ==
+					CAM_HW_CDM_BL_CB_INTERNAL) {
+					CAM_ERR(CAM_CDM, "Invalid node=%pK %d",
+						node, node->request_type);
+				}
+				list_del_init(&node->entry);
+				kfree(node);
+			} else {
+				CAM_ERR(CAM_CDM, "Invalid node for inline irq");
+			}
+			mutex_unlock(&cdm_hw->hw_mutex);
+		}
+		if (payload->irq_status & 0x1) {
+			CAM_DBG(CAM_CDM, "CDM HW reset done IRQ");
+			complete(&core->reset_complete);
+		}
+		kfree(payload);
+	}
+
+}
+
+int cam_virtual_cdm_submit_bl(struct cam_hw_info *cdm_hw,
+	struct cam_cdm_hw_intf_cmd_submit_bl *req,
+	struct cam_cdm_client *client)
+{
+	int i, rc = -EINVAL;
+	struct cam_cdm_bl_request *cdm_cmd = req->data;
+	struct cam_cdm *core = (struct cam_cdm *)cdm_hw->core_info;
+
+	mutex_lock(&client->lock);
+	for (i = 0; i < req->data->cmd_arrary_count ; i++) {
+		uintptr_t vaddr_ptr = 0;
+		size_t len = 0;
+
+		if ((!cdm_cmd->cmd[i].len) &&
+			(cdm_cmd->cmd[i].len > 0x100000)) {
+			CAM_ERR(CAM_CDM,
+				"len(%d) is invalid count=%d total cnt=%d",
+				cdm_cmd->cmd[i].len, i,
+				req->data->cmd_arrary_count);
+			rc = -EINVAL;
+			break;
+		}
+		if (req->data->type == CAM_CDM_BL_CMD_TYPE_MEM_HANDLE) {
+			rc = cam_mem_get_cpu_buf(
+				cdm_cmd->cmd[i].bl_addr.mem_handle, &vaddr_ptr,
+				&len);
+		} else if (req->data->type ==
+			CAM_CDM_BL_CMD_TYPE_KERNEL_IOVA) {
+			rc = 0;
+			vaddr_ptr = cdm_cmd->cmd[i].bl_addr.kernel_iova;
+			len = cdm_cmd->cmd[i].offset + cdm_cmd->cmd[i].len;
+		} else {
+			CAM_ERR(CAM_CDM,
+				"Only mem hdl/Kernel va type is supported %d",
+				req->data->type);
+			rc = -EINVAL;
+			break;
+		}
+
+		if ((!rc) && (vaddr_ptr) && (len) &&
+			(len >= cdm_cmd->cmd[i].offset)) {
+			CAM_DBG(CAM_CDM,
+				"hdl=%x vaddr=%pK offset=%d cmdlen=%d:%zu",
+				cdm_cmd->cmd[i].bl_addr.mem_handle,
+				(void *)vaddr_ptr, cdm_cmd->cmd[i].offset,
+				cdm_cmd->cmd[i].len, len);
+			rc = cam_cdm_util_cmd_buf_write(
+				&client->changebase_addr,
+				((uint32_t *)vaddr_ptr +
+					((cdm_cmd->cmd[i].offset)/4)),
+				cdm_cmd->cmd[i].len, client->data.base_array,
+				client->data.base_array_cnt, core->bl_tag);
+			if (rc) {
+				CAM_ERR(CAM_CDM,
+					"write failed for cnt=%d:%d len %u",
+					i, req->data->cmd_arrary_count,
+					cdm_cmd->cmd[i].len);
+				break;
+			}
+		} else {
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for hdl=%x len=%zu:%d",
+				cdm_cmd->cmd[i].bl_addr.mem_handle, len,
+				cdm_cmd->cmd[i].offset);
+			CAM_ERR(CAM_CDM,
+				"Sanity check failed for cmd_count=%d cnt=%d",
+				i, req->data->cmd_arrary_count);
+			rc = -EINVAL;
+			break;
+		}
+		if (!rc) {
+			struct cam_cdm_work_payload *payload;
+
+			CAM_DBG(CAM_CDM,
+				"write BL success for cnt=%d with tag=%d",
+				i, core->bl_tag);
+			if ((true == req->data->flag) &&
+				(i == req->data->cmd_arrary_count)) {
+				struct cam_cdm_bl_cb_request_entry *node;
+
+				node = kzalloc(sizeof(
+					struct cam_cdm_bl_cb_request_entry),
+					GFP_KERNEL);
+				if (!node) {
+					rc = -ENOMEM;
+					break;
+				}
+				node->request_type = CAM_HW_CDM_BL_CB_CLIENT;
+				node->client_hdl = req->handle;
+				node->cookie = req->data->cookie;
+				node->bl_tag = core->bl_tag;
+				node->userdata = req->data->userdata;
+				mutex_lock(&cdm_hw->hw_mutex);
+				list_add_tail(&node->entry,
+					&core->bl_request_list);
+				mutex_unlock(&cdm_hw->hw_mutex);
+
+				payload = kzalloc(sizeof(
+					struct cam_cdm_work_payload),
+					GFP_ATOMIC);
+				if (payload) {
+					payload->irq_status = 0x2;
+					payload->irq_data = core->bl_tag;
+					payload->hw = cdm_hw;
+					INIT_WORK((struct work_struct *)
+						&payload->work,
+						cam_virtual_cdm_work);
+					queue_work(core->work_queue,
+						&payload->work);
+					}
+			}
+			core->bl_tag++;
+			CAM_DBG(CAM_CDM,
+				"Now commit the BL nothing for virtual");
+			if (!rc && (core->bl_tag == 63))
+				core->bl_tag = 0;
+		}
+	}
+	mutex_unlock(&client->lock);
+	return rc;
+}
+
+int cam_virtual_cdm_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	struct cam_cdm_private_dt_data *soc_private = NULL;
+	int rc;
+	struct cam_cpas_register_params cpas_parms;
+
+	cdm_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cdm_hw_intf)
+		return -ENOMEM;
+
+	cdm_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cdm_hw) {
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+
+	cdm_hw->core_info = kzalloc(sizeof(struct cam_cdm), GFP_KERNEL);
+	if (!cdm_hw->core_info) {
+		kfree(cdm_hw);
+		kfree(cdm_hw_intf);
+		return -ENOMEM;
+	}
+	cdm_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cdm_hw->soc_info.pdev = pdev;
+	cdm_hw_intf->hw_type = CAM_VIRTUAL_CDM;
+	cdm_hw->soc_info.soc_private = kzalloc(
+			sizeof(struct cam_cdm_private_dt_data), GFP_KERNEL);
+	if (!cdm_hw->soc_info.soc_private) {
+		rc = -ENOMEM;
+		goto soc_load_failed;
+	}
+
+	rc = cam_cdm_soc_load_dt_private(pdev, cdm_hw->soc_info.soc_private);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Failed to load CDM dt private data");
+		kfree(cdm_hw->soc_info.soc_private);
+		cdm_hw->soc_info.soc_private = NULL;
+		goto soc_load_failed;
+	}
+
+	cdm_core = (struct cam_cdm *)cdm_hw->core_info;
+	soc_private = (struct cam_cdm_private_dt_data *)
+					cdm_hw->soc_info.soc_private;
+	if (soc_private->dt_cdm_shared == true)
+		cdm_core->flags = CAM_CDM_FLAG_SHARED_CDM;
+	else
+		cdm_core->flags = CAM_CDM_FLAG_PRIVATE_CDM;
+
+	cdm_core->bl_tag = 0;
+	INIT_LIST_HEAD(&cdm_core->bl_request_list);
+	init_completion(&cdm_core->reset_complete);
+	cdm_hw_intf->hw_priv = cdm_hw;
+	cdm_hw_intf->hw_ops.get_hw_caps = cam_cdm_get_caps;
+	cdm_hw_intf->hw_ops.init = NULL;
+	cdm_hw_intf->hw_ops.deinit = NULL;
+	cdm_hw_intf->hw_ops.start = cam_cdm_stream_start;
+	cdm_hw_intf->hw_ops.stop = cam_cdm_stream_stop;
+	cdm_hw_intf->hw_ops.read = NULL;
+	cdm_hw_intf->hw_ops.write = NULL;
+	cdm_hw_intf->hw_ops.process_cmd = cam_cdm_process_cmd;
+
+	CAM_DBG(CAM_CDM, "type %d index %d", cdm_hw_intf->hw_type,
+		cdm_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, cdm_hw_intf);
+
+	cdm_hw->open_count = 0;
+	cdm_core->iommu_hdl.non_secure = -1;
+	cdm_core->iommu_hdl.secure = -1;
+	mutex_init(&cdm_hw->hw_mutex);
+	spin_lock_init(&cdm_hw->hw_lock);
+	init_completion(&cdm_hw->hw_complete);
+	mutex_lock(&cdm_hw->hw_mutex);
+	cdm_core->id = CAM_CDM_VIRTUAL;
+	memcpy(cdm_core->name, CAM_CDM_VIRTUAL_NAME,
+		sizeof(CAM_CDM_VIRTUAL_NAME));
+	cdm_core->work_queue = alloc_workqueue(cdm_core->name,
+		WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS,
+		CAM_CDM_INFLIGHT_WORKS);
+	cdm_core->ops = NULL;
+
+	cpas_parms.cam_cpas_client_cb = cam_cdm_cpas_cb;
+	cpas_parms.cell_index = cdm_hw->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = cdm_hw_intf;
+	strlcpy(cpas_parms.identifier, "cam-cdm-intf",
+		CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Virtual CDM CPAS registration failed");
+		goto cpas_registration_failed;
+	}
+	CAM_DBG(CAM_CDM, "CPAS registration successful handle=%d",
+		cpas_parms.client_handle);
+	cdm_core->cpas_handle = cpas_parms.client_handle;
+
+	CAM_DBG(CAM_CDM, "CDM%d probe successful", cdm_hw_intf->hw_idx);
+
+	rc = cam_cdm_intf_register_hw_cdm(cdm_hw_intf,
+			soc_private, CAM_VIRTUAL_CDM, &cdm_core->index);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "Virtual CDM Interface registration failed");
+		goto intf_registration_failed;
+	}
+	CAM_DBG(CAM_CDM, "CDM%d registered to intf successful",
+		cdm_hw_intf->hw_idx);
+	mutex_unlock(&cdm_hw->hw_mutex);
+
+	return 0;
+intf_registration_failed:
+	cam_cpas_unregister_client(cdm_core->cpas_handle);
+cpas_registration_failed:
+	kfree(cdm_hw->soc_info.soc_private);
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_unlock(&cdm_hw->hw_mutex);
+	mutex_destroy(&cdm_hw->hw_mutex);
+soc_load_failed:
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	return rc;
+}
+
+int cam_virtual_cdm_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *cdm_hw = NULL;
+	struct cam_hw_intf *cdm_hw_intf = NULL;
+	struct cam_cdm *cdm_core = NULL;
+	int rc = -EBUSY;
+
+	cdm_hw_intf = platform_get_drvdata(pdev);
+	if (!cdm_hw_intf) {
+		CAM_ERR(CAM_CDM, "Failed to get dev private data");
+		return rc;
+	}
+
+	cdm_hw = cdm_hw_intf->hw_priv;
+	if (!cdm_hw) {
+		CAM_ERR(CAM_CDM,
+			"Failed to get virtual private data for type=%d idx=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	cdm_core = cdm_hw->core_info;
+	if (!cdm_core) {
+		CAM_ERR(CAM_CDM,
+			"Failed to get virtual core data for type=%d idx=%d",
+			cdm_hw_intf->hw_type, cdm_hw_intf->hw_idx);
+		return rc;
+	}
+
+	rc = cam_cpas_unregister_client(cdm_core->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_CDM, "CPAS unregister failed");
+		return rc;
+	}
+
+	rc = cam_cdm_intf_deregister_hw_cdm(cdm_hw_intf,
+			cdm_hw->soc_info.soc_private, CAM_VIRTUAL_CDM,
+			cdm_core->index);
+	if (rc) {
+		CAM_ERR(CAM_CDM,
+			"Virtual CDM Interface de-registration failed");
+		return rc;
+	}
+
+	flush_workqueue(cdm_core->work_queue);
+	destroy_workqueue(cdm_core->work_queue);
+	mutex_destroy(&cdm_hw->hw_mutex);
+	kfree(cdm_hw->soc_info.soc_private);
+	kfree(cdm_hw->core_info);
+	kfree(cdm_hw);
+	kfree(cdm_hw_intf);
+	rc = 0;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h b/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h
new file mode 100644
index 0000000..b2409ee
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cdm/cam_hw_cdm170_reg.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HW_CDM170_REG_H_
+#define _CAM_HW_CDM170_REG_H_
+
+#define CAM_CDM_REG_OFFSET_FIRST 0x0
+#define CAM_CDM_REG_OFFSET_LAST 0x200
+#define CAM_CDM_REGS_COUNT 0x30
+#define CAM_CDM_HWFIFO_SIZE 0x40
+
+#define CAM_CDM_OFFSET_HW_VERSION 0x0
+#define CAM_CDM_OFFSET_TITAN_VERSION 0x4
+#define CAM_CDM_OFFSET_RST_CMD 0x10
+#define CAM_CDM_OFFSET_CGC_CFG 0x14
+#define CAM_CDM_OFFSET_CORE_CFG 0x18
+#define CAM_CDM_OFFSET_CORE_EN 0x1c
+#define CAM_CDM_OFFSET_FE_CFG 0x20
+#define CAM_CDM_OFFSET_IRQ_MASK 0x30
+#define CAM_CDM_OFFSET_IRQ_CLEAR 0x34
+#define CAM_CDM_OFFSET_IRQ_CLEAR_CMD 0x38
+#define CAM_CDM_OFFSET_IRQ_SET 0x3c
+#define CAM_CDM_OFFSET_IRQ_SET_CMD 0x40
+
+#define CAM_CDM_OFFSET_IRQ_STATUS 0x44
+#define CAM_CDM_IRQ_STATUS_INFO_RST_DONE_MASK 0x1
+#define CAM_CDM_IRQ_STATUS_INFO_INLINE_IRQ_MASK 0x2
+#define CAM_CDM_IRQ_STATUS_INFO_BL_DONE_MASK 0x4
+#define CAM_CDM_IRQ_STATUS_ERROR_INV_CMD_MASK 0x10000
+#define CAM_CDM_IRQ_STATUS_ERROR_OVER_FLOW_MASK 0x20000
+#define CAM_CDM_IRQ_STATUS_ERROR_AHB_BUS_MASK 0x40000
+
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_REG 0x50
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_REG 0x54
+#define CAM_CDM_OFFSET_BL_FIFO_STORE_REG 0x58
+#define CAM_CDM_OFFSET_BL_FIFO_CFG 0x5c
+#define CAM_CDM_OFFSET_BL_FIFO_RB 0x60
+#define CAM_CDM_OFFSET_BL_FIFO_BASE_RB 0x64
+#define CAM_CDM_OFFSET_BL_FIFO_LEN_RB 0x68
+#define CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB 0x6c
+#define CAM_CDM_OFFSET_IRQ_USR_DATA 0x80
+#define CAM_CDM_OFFSET_WAIT_STATUS 0x84
+#define CAM_CDM_OFFSET_SCRATCH_0_REG 0x90
+#define CAM_CDM_OFFSET_SCRATCH_1_REG 0x94
+#define CAM_CDM_OFFSET_SCRATCH_2_REG 0x98
+#define CAM_CDM_OFFSET_SCRATCH_3_REG 0x9c
+#define CAM_CDM_OFFSET_SCRATCH_4_REG 0xa0
+#define CAM_CDM_OFFSET_SCRATCH_5_REG 0xa4
+#define CAM_CDM_OFFSET_SCRATCH_6_REG 0xa8
+#define CAM_CDM_OFFSET_SCRATCH_7_REG 0xac
+#define CAM_CDM_OFFSET_LAST_AHB_ADDR 0xd0
+#define CAM_CDM_OFFSET_LAST_AHB_DATA 0xd4
+#define CAM_CDM_OFFSET_CORE_DBUG 0xd8
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR 0xe0
+#define CAM_CDM_OFFSET_LAST_AHB_ERR_DATA 0xe4
+#define CAM_CDM_OFFSET_CURRENT_BL_BASE 0xe8
+#define CAM_CDM_OFFSET_CURRENT_BL_LEN 0xec
+#define CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE 0xf0
+#define CAM_CDM_OFFSET_DEBUG_STATUS 0xf4
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_0 0x100
+#define CAM_CDM_OFFSET_BUS_MISR_CFG_1 0x104
+#define CAM_CDM_OFFSET_BUS_MISR_RD_VAL 0x108
+#define CAM_CDM_OFFSET_PERF_MON_CTRL 0x110
+#define CAM_CDM_OFFSET_PERF_MON_0 0x114
+#define CAM_CDM_OFFSET_PERF_MON_1 0x118
+#define CAM_CDM_OFFSET_PERF_MON_2 0x11c
+#define CAM_CDM_OFFSET_SPARE 0x200
+
+/*
+ * Always make sure below register offsets are aligned with
+ * enum cam_cdm_regs offsets
+ */
+struct cam_cdm_reg_offset cam170_cpas_cdm_register_offsets[] = {
+	{ CAM_CDM_OFFSET_HW_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_TITAN_VERSION, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_RST_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_CGC_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_CORE_EN, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_FE_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_MASK, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_CLEAR_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_SET_CMD, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_IRQ_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_IRQ_USR_DATA, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_STORE_REG, CAM_REG_ATTR_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_CFG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_RB, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BL_FIFO_BASE_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_LEN_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BL_FIFO_PENDING_REQ_RB, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_WAIT_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SCRATCH_0_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_1_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_2_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_3_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_4_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_5_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_6_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_SCRATCH_7_REG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CORE_DBUG, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_ADDR, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_LAST_AHB_ERR_DATA, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_BL_LEN, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_CURRENT_USED_AHB_BASE, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_DEBUG_STATUS, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_0, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_CFG_1, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_BUS_MISR_RD_VAL, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_CTRL, CAM_REG_ATTR_READ_WRITE },
+	{ CAM_CDM_OFFSET_PERF_MON_0, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_1, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_PERF_MON_2, CAM_REG_ATTR_READ },
+	{ CAM_CDM_OFFSET_SPARE, CAM_REG_ATTR_READ_WRITE }
+};
+
+struct cam_cdm_reg_offset_table cam170_cpas_cdm_offset_table = {
+	.first_offset = 0x0,
+	.last_offset = 0x200,
+	.reg_count = 0x30,
+	.offsets = cam170_cpas_cdm_register_offsets,
+	.offset_max_size = (sizeof(cam170_cpas_cdm_register_offsets)/
+		sizeof(struct cam_cdm_reg_offset)),
+};
+
+#endif /* _CAM_HW_CDM170_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/Makefile b/drivers/media/platform/msm/camera/cam_core/Makefile
new file mode 100644
index 0000000..3006052
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_context.o cam_context_utils.o cam_node.o cam_subdev.o
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.c b/drivers/media/platform/msm/camera/cam_core/cam_context.c
new file mode 100644
index 0000000..2cb62de
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/refcount.h>
+
+#include "cam_context.h"
+#include "cam_debug_util.h"
+#include "cam_node.h"
+
+static int cam_context_handle_hw_event(void *context, uint32_t evt_id,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_context *ctx = (struct cam_context *)context;
+
+	if (!ctx || !ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (ctx->state_machine[ctx->state].irq_ops)
+		rc = ctx->state_machine[ctx->state].irq_ops(ctx, evt_id,
+			evt_data);
+	else
+		CAM_DBG(CAM_CORE,
+			"No function to handle event %d in dev %d, state %d",
+			evt_id, ctx->dev_hdl, ctx->state);
+	return rc;
+}
+
+int cam_context_shutdown(struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_release_dev_cmd cmd;
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state > CAM_CTX_AVAILABLE && ctx->state < CAM_CTX_STATE_MAX) {
+		cmd.session_handle = ctx->session_hdl;
+		cmd.dev_handle = ctx->dev_hdl;
+		rc = cam_context_handle_release_dev(ctx, &cmd);
+		if (rc)
+			CAM_ERR(CAM_CORE,
+				"context release failed for dev_name %s",
+				ctx->dev_name);
+		else
+			cam_context_putref(ctx);
+	} else {
+		CAM_WARN(CAM_CORE,
+			"dev %s context id %u state %d invalid to release hdl",
+			ctx->dev_name, ctx->ctx_id, ctx->state);
+		rc = -EINVAL;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	rc = cam_destroy_device_hdl(ctx->dev_hdl);
+	if (rc)
+		CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
+			ctx->dev_name);
+	else
+		ctx->dev_hdl = -1;
+
+	return rc;
+}
+
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
+	struct cam_req_mgr_device_info *info)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!info) {
+		CAM_ERR(CAM_CORE, "Invalid get device info payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.get_dev_info) {
+		rc = ctx->state_machine[ctx->state].crm_ops.get_dev_info(
+			ctx, info);
+	} else {
+		CAM_ERR(CAM_CORE, "No get device info in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_link(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!link) {
+		CAM_ERR(CAM_CORE, "Invalid link payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.link) {
+		rc = ctx->state_machine[ctx->state].crm_ops.link(ctx, link);
+	} else {
+		CAM_ERR(CAM_CORE, "No crm link in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!unlink) {
+		CAM_ERR(CAM_CORE, "Invalid unlink payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.unlink) {
+		rc = ctx->state_machine[ctx->state].crm_ops.unlink(
+			ctx, unlink);
+	} else {
+		CAM_ERR(CAM_CORE, "No crm unlink in dev %d, name %s, state %d",
+			ctx->dev_hdl, ctx->dev_name, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!apply) {
+		CAM_ERR(CAM_CORE, "Invalid apply request payload");
+		return -EINVAL;
+	}
+
+	if (ctx->state_machine[ctx->state].crm_ops.apply_req) {
+		rc = ctx->state_machine[ctx->state].crm_ops.apply_req(ctx,
+			apply);
+	} else {
+		CAM_ERR(CAM_CORE, "No crm apply req in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+
+	return rc;
+}
+
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+	struct cam_req_mgr_flush_request *flush)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.flush_req) {
+		rc = ctx->state_machine[ctx->state].crm_ops.flush_req(ctx,
+			flush);
+	} else {
+		CAM_ERR(CAM_CORE, "No crm flush req in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *process_evt)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].crm_ops.process_evt) {
+		rc = ctx->state_machine[ctx->state].crm_ops.process_evt(ctx,
+			process_evt);
+	} else {
+		/* handling of this message is optional */
+		CAM_DBG(CAM_CORE, "No crm process evt in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+	uint32_t buf_info)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (ctx->state_machine[ctx->state].pagefault_ops) {
+		rc = ctx->state_machine[ctx->state].pagefault_ops(ctx, iova,
+			buf_info);
+	} else {
+		CAM_WARN(CAM_CORE, "No dump ctx in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+	}
+
+	return rc;
+}
+
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+	int i;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid acquire device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.acquire_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_dev(
+			ctx, cmd);
+	} else {
+		CAM_ERR(CAM_CORE, "No acquire device in dev %d, state %d",
+			cmd->dev_handle, ctx->state);
+		rc = -EPROTO;
+	}
+
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	INIT_LIST_HEAD(&ctx->wait_req_list);
+	INIT_LIST_HEAD(&ctx->pending_req_list);
+	INIT_LIST_HEAD(&ctx->free_req_list);
+
+	for (i = 0; i < ctx->req_size; i++) {
+		INIT_LIST_HEAD(&ctx->req_list[i].list);
+		list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+	}
+
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_acquire_hw(struct cam_context *ctx,
+	void *args)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!args) {
+		CAM_ERR(CAM_CORE, "Invalid acquire device hw command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.acquire_hw) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.acquire_hw(
+			ctx, args);
+	} else {
+		CAM_ERR(CAM_CORE, "No acquire hw for dev %s, state %d",
+			ctx->dev_name, ctx->state);
+		rc = -EPROTO;
+	}
+
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_release_dev(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid release device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.release_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.release_dev(
+			ctx, cmd);
+	} else {
+		CAM_ERR(CAM_CORE, "No release device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_release_hw(struct cam_context *ctx,
+	void *args)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!args) {
+		CAM_ERR(CAM_CORE, "Invalid release HW command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.release_hw) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.release_hw(
+			ctx, args);
+	} else {
+		CAM_ERR(CAM_CORE, "No release hw for dev %s, state %d",
+			ctx->dev_name, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid flush device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.flush_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.flush_dev(
+			ctx, cmd);
+	} else {
+		CAM_WARN(CAM_CORE, "No flush device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_config_dev(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	if (!ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid config device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.config_dev) {
+		rc = ctx->state_machine[ctx->state].ioctl_ops.config_dev(
+			ctx, cmd);
+	} else {
+		CAM_ERR(CAM_CORE, "No config device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+		rc = -EPROTO;
+	}
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_start_dev(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx || !ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid start device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.start_dev)
+		rc = ctx->state_machine[ctx->state].ioctl_ops.start_dev(
+			ctx, cmd);
+	else
+		/* start device can be optional for some driver */
+		CAM_DBG(CAM_CORE, "No start device in dev %d, state %d",
+			ctx->dev_hdl, ctx->state);
+
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	if (!ctx || !ctx->state_machine) {
+		CAM_ERR(CAM_CORE, "Context is not ready");
+		return -EINVAL;
+	}
+
+	if (!cmd) {
+		CAM_ERR(CAM_CORE, "Invalid stop device command payload");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx->ctx_mutex);
+	if (ctx->state_machine[ctx->state].ioctl_ops.stop_dev)
+		rc = ctx->state_machine[ctx->state].ioctl_ops.stop_dev(
+			ctx, cmd);
+	else
+		/* stop device can be optional for some driver */
+		CAM_WARN(CAM_CORE, "No stop device in dev %d, name %s state %d",
+			ctx->dev_hdl, ctx->dev_name, ctx->state);
+
+	ctx->last_flush_req = 0;
+	mutex_unlock(&ctx->ctx_mutex);
+
+	return rc;
+}
+
+int cam_context_init(struct cam_context *ctx,
+	const char *dev_name,
+	uint64_t dev_id,
+	uint32_t ctx_id,
+	struct cam_req_mgr_kmd_ops *crm_node_intf,
+	struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_ctx_request *req_list,
+	uint32_t req_size)
+{
+	int i;
+
+	/* crm_node_intf is optinal */
+	if (!ctx || !hw_mgr_intf || !req_list) {
+		CAM_ERR(CAM_CORE, "Invalid input parameters");
+		return -EINVAL;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+	ctx->dev_hdl = -1;
+	ctx->link_hdl = -1;
+	ctx->session_hdl = -1;
+	INIT_LIST_HEAD(&ctx->list);
+	mutex_init(&ctx->ctx_mutex);
+	mutex_init(&ctx->sync_mutex);
+	spin_lock_init(&ctx->lock);
+
+	ctx->dev_name = dev_name;
+	ctx->dev_id = dev_id;
+	ctx->ctx_id = ctx_id;
+	ctx->ctx_crm_intf = NULL;
+	ctx->crm_ctx_intf = crm_node_intf;
+	ctx->hw_mgr_intf = hw_mgr_intf;
+	ctx->irq_cb_intf = cam_context_handle_hw_event;
+
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	INIT_LIST_HEAD(&ctx->wait_req_list);
+	INIT_LIST_HEAD(&ctx->pending_req_list);
+	INIT_LIST_HEAD(&ctx->free_req_list);
+	ctx->req_list = req_list;
+	ctx->req_size = req_size;
+	for (i = 0; i < req_size; i++) {
+		INIT_LIST_HEAD(&ctx->req_list[i].list);
+		list_add_tail(&ctx->req_list[i].list, &ctx->free_req_list);
+		ctx->req_list[i].ctx = ctx;
+	}
+	ctx->state = CAM_CTX_AVAILABLE;
+	ctx->state_machine = NULL;
+	ctx->ctx_priv = NULL;
+
+	return 0;
+}
+
+int cam_context_deinit(struct cam_context *ctx)
+{
+	if (!ctx)
+		return -EINVAL;
+
+	/**
+	 * This is called from platform device remove.
+	 * Everyting should be released at this moment.
+	 * so we just free the memory for the context
+	 */
+	if (ctx->state != CAM_CTX_AVAILABLE)
+		CAM_ERR(CAM_CORE, "Device did not shutdown cleanly");
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
+
+void cam_context_putref(struct cam_context *ctx)
+{
+	kref_put(&ctx->refcount, cam_node_put_ctxt_to_free_list);
+	CAM_DBG(CAM_CORE,
+		"ctx device hdl %ld, ref count %d, dev_name %s",
+		ctx->dev_hdl, refcount_read(&(ctx->refcount.refcount)),
+		ctx->dev_name);
+}
+
+void cam_context_getref(struct cam_context *ctx)
+{
+	if (kref_get_unless_zero(&ctx->refcount) == 0) {
+		/* should never happen */
+		WARN(1, "%s fail\n", __func__);
+	}
+	CAM_DBG(CAM_CORE,
+		"ctx device hdl %ld, ref count %d, dev_name %s",
+		ctx->dev_hdl, refcount_read(&(ctx->refcount.refcount)),
+		ctx->dev_name);
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context.h b/drivers/media/platform/msm/camera/cam_core/cam_context.h
new file mode 100644
index 0000000..3697bc6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context.h
@@ -0,0 +1,460 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CONTEXT_H_
+#define _CAM_CONTEXT_H_
+
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/kref.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_hw_mgr_intf.h"
+
+/* Forward declarations */
+struct cam_context;
+
+/* max request number */
+#define CAM_CTX_REQ_MAX              20
+#define CAM_CTX_CFG_MAX              20
+#define CAM_CTX_RES_MAX              20
+
+/**
+ * enum cam_ctx_state -  context top level states
+ *
+ */
+enum cam_context_state {
+	CAM_CTX_UNINIT               = 0,
+	CAM_CTX_AVAILABLE            = 1,
+	CAM_CTX_ACQUIRED             = 2,
+	CAM_CTX_READY                = 3,
+	CAM_CTX_ACTIVATED            = 4,
+	CAM_CTX_STATE_MAX            = 5,
+};
+
+/**
+ * struct cam_ctx_request - Common request structure for the context
+ *
+ * @list:                  Link list entry
+ * @status:                Request status
+ * @request_id:            Request id
+ * @req_priv:              Derived request object
+ * @hw_update_entries:     Hardware update entries
+ * @num_hw_update_entries: Number of hardware update entries
+ * @in_map_entries:        Entries for in fences
+ * @num_in_map_entries:    Number of in map entries
+ * @out_map_entries:       Entries for out fences
+ * @num_out_map_entries:   Number of out map entries
+ * @num_in_acked:          Number of in fence acked
+ * @num_out_acked:         Number of out fence acked
+ * @flushed:               Request is flushed
+ * @ctx:                   The context to which this request belongs
+ * @pf_data                page fault debug data
+ *
+ */
+struct cam_ctx_request {
+	struct list_head               list;
+	uint32_t                       status;
+	uint64_t                       request_id;
+	void                          *req_priv;
+	struct cam_hw_update_entry     hw_update_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_hw_update_entries;
+	struct cam_hw_fence_map_entry  in_map_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_in_map_entries;
+	struct cam_hw_fence_map_entry  out_map_entries[CAM_CTX_CFG_MAX];
+	uint32_t                       num_out_map_entries;
+	atomic_t                       num_in_acked;
+	uint32_t                       num_out_acked;
+	int                            flushed;
+	struct cam_context            *ctx;
+	struct cam_hw_mgr_dump_pf_data pf_data;
+};
+
+/**
+ * struct cam_ctx_ioctl_ops - Function table for handling IOCTL calls
+ *
+ * @acquire_dev:           Function pointer for acquire device
+ * @release_dev:           Function pointer for release device
+ * @config_dev:            Function pointer for config device
+ * @start_dev:             Function pointer for start device
+ * @stop_dev:              Function pointer for stop device
+ * @flush_dev:             Function pointer for flush device
+ * @acquire_hw:            Function pointer for acquire hw
+ * @release_hw:            Function pointer for release hw
+ *
+ */
+struct cam_ctx_ioctl_ops {
+	int (*acquire_dev)(struct cam_context *ctx,
+			struct cam_acquire_dev_cmd *cmd);
+	int (*release_dev)(struct cam_context *ctx,
+			struct cam_release_dev_cmd *cmd);
+	int (*config_dev)(struct cam_context *ctx,
+			struct cam_config_dev_cmd *cmd);
+	int (*start_dev)(struct cam_context *ctx,
+			struct cam_start_stop_dev_cmd *cmd);
+	int (*stop_dev)(struct cam_context *ctx,
+			struct cam_start_stop_dev_cmd *cmd);
+	int (*flush_dev)(struct cam_context *ctx,
+			struct cam_flush_dev_cmd *cmd);
+	int (*acquire_hw)(struct cam_context *ctx, void *args);
+	int (*release_hw)(struct cam_context *ctx, void *args);
+};
+
+/**
+ * struct cam_ctx_crm_ops -  Function table for handling CRM to context calls
+ *
+ * @get_dev_info:          Get device informaiton
+ * @link:                  Link the context
+ * @unlink:                Unlink the context
+ * @apply_req:             Apply setting for the context
+ * @flush_req:             Flush request to remove request ids
+ * @process_evt:           Handle event notification from CRM.(optional)
+ *
+ */
+struct cam_ctx_crm_ops {
+	int (*get_dev_info)(struct cam_context *ctx,
+			struct cam_req_mgr_device_info *device_info);
+	int (*link)(struct cam_context *ctx,
+			struct cam_req_mgr_core_dev_link_setup *link);
+	int (*unlink)(struct cam_context *ctx,
+			struct cam_req_mgr_core_dev_link_setup *unlink);
+	int (*apply_req)(struct cam_context *ctx,
+			struct cam_req_mgr_apply_request *apply);
+	int (*flush_req)(struct cam_context *ctx,
+			struct cam_req_mgr_flush_request *flush);
+	int (*process_evt)(struct cam_context *ctx,
+			struct cam_req_mgr_link_evt_data *evt_data);
+};
+
+
+/**
+ * struct cam_ctx_ops - Collection of the interface funciton tables
+ *
+ * @ioctl_ops:             Ioctl funciton table
+ * @crm_ops:               CRM to context interface function table
+ * @irq_ops:               Hardware event handle function
+ * @pagefault_ops:         Function to be called on page fault
+ *
+ */
+struct cam_ctx_ops {
+	struct cam_ctx_ioctl_ops     ioctl_ops;
+	struct cam_ctx_crm_ops       crm_ops;
+	cam_hw_event_cb_func         irq_ops;
+	cam_hw_pagefault_cb_func     pagefault_ops;
+};
+
+/**
+ * struct cam_context - camera context object for the subdevice node
+ *
+ * @dev_name:              String giving name of device associated
+ * @dev_id:                ID of device associated
+ * @ctx_id:                ID for this context
+ * @list:                  Link list entry
+ * @sessoin_hdl:           Session handle
+ * @dev_hdl:               Device handle
+ * @link_hdl:              Link handle
+ * @ctx_mutex:             Mutex for ioctl calls
+ * @lock:                  Spin lock
+ * @active_req_list:       Requests pending for done event
+ * @pending_req_list:      Requests pending for reg upd event
+ * @wait_req_list:         Requests waiting for apply
+ * @free_req_list:         Requests that are free
+ * @req_list:              Reference to the request storage
+ * @req_size:              Size of the request storage
+ * @hw_mgr_intf:           Context to HW interface
+ * @ctx_crm_intf:          Context to CRM interface
+ * @crm_ctx_intf:          CRM to context interface
+ * @irq_cb_intf:           HW to context callback interface
+ * @state:                 Current state for top level state machine
+ * @state_machine:         Top level state machine
+ * @ctx_priv:              Private context pointer
+ * @ctxt_to_hw_map:        Context to hardware mapping pointer
+ * @refcount:              Context object refcount
+ * @node:                  The main node to which this context belongs
+ * @sync_mutex:            mutex to sync with sync cb thread
+ * @last_flush_req:        Last request to flush
+ *
+ */
+struct cam_context {
+	const char                  *dev_name;
+	uint64_t                     dev_id;
+	uint32_t                     ctx_id;
+	struct list_head             list;
+	int32_t                      session_hdl;
+	int32_t                      dev_hdl;
+	int32_t                      link_hdl;
+
+	struct mutex                 ctx_mutex;
+	spinlock_t                   lock;
+
+	struct list_head             active_req_list;
+	struct list_head             pending_req_list;
+	struct list_head             wait_req_list;
+	struct list_head             free_req_list;
+	struct cam_ctx_request      *req_list;
+	uint32_t                     req_size;
+
+	struct cam_hw_mgr_intf      *hw_mgr_intf;
+	struct cam_req_mgr_crm_cb   *ctx_crm_intf;
+	struct cam_req_mgr_kmd_ops  *crm_ctx_intf;
+	cam_hw_event_cb_func         irq_cb_intf;
+
+	enum cam_context_state       state;
+	struct cam_ctx_ops          *state_machine;
+
+	void                        *ctx_priv;
+	void                        *ctxt_to_hw_map;
+
+	struct kref                  refcount;
+	void                        *node;
+	struct mutex                 sync_mutex;
+	uint32_t                     last_flush_req;
+};
+
+/**
+ * cam_context_shutdown()
+ *
+ * @brief:        Calls while device close or shutdown
+ *
+ * @ctx:          Object pointer for cam_context
+ *
+ */
+int cam_context_shutdown(struct cam_context *ctx);
+
+/**
+ * cam_context_handle_crm_get_dev_info()
+ *
+ * @brief:        Handle get device information command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @info:         Device information returned
+ *
+ */
+int cam_context_handle_crm_get_dev_info(struct cam_context *ctx,
+		struct cam_req_mgr_device_info *info);
+
+/**
+ * cam_context_handle_crm_link()
+ *
+ * @brief:        Handle link command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @link:         Link command payload
+ *
+ */
+int cam_context_handle_crm_link(struct cam_context *ctx,
+		struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * cam_context_handle_crm_unlink()
+ *
+ * @brief:        Handle unlink command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @unlink:       Unlink command payload
+ *
+ */
+int cam_context_handle_crm_unlink(struct cam_context *ctx,
+		struct cam_req_mgr_core_dev_link_setup *unlink);
+
+/**
+ * cam_context_handle_crm_apply_req()
+ *
+ * @brief:        Handle apply request command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @apply:        Apply request command payload
+ *
+ */
+int cam_context_handle_crm_apply_req(struct cam_context *ctx,
+		struct cam_req_mgr_apply_request *apply);
+
+/**
+ * cam_context_handle_crm_flush_req()
+ *
+ * @brief:        Handle flush request command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @apply:        Flush request command payload
+ *
+ */
+int cam_context_handle_crm_flush_req(struct cam_context *ctx,
+		struct cam_req_mgr_flush_request *apply);
+
+/**
+ * cam_context_handle_crm_process_evt()
+ *
+ * @brief:        Handle process event command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @process_evt:  process event command payload
+ *
+ */
+int cam_context_handle_crm_process_evt(struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *process_evt);
+
+/**
+ * cam_context_dump_pf_info()
+ *
+ * @brief:        Handle dump active request request command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @iova:         Page fault address
+ * @buf_info:     Information about closest memory handle
+ *
+ */
+int cam_context_dump_pf_info(struct cam_context *ctx, unsigned long iova,
+	uint32_t buf_info);
+
+/**
+ * cam_context_handle_acquire_dev()
+ *
+ * @brief:        Handle acquire device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Acquire device command payload
+ *
+ */
+int cam_context_handle_acquire_dev(struct cam_context *ctx,
+		struct cam_acquire_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_acquire_hw()
+ *
+ * @brief:        Handle acquire HW command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Acquire HW command payload
+ *
+ */
+int cam_context_handle_acquire_hw(struct cam_context *ctx,
+		void *cmd);
+
+/**
+ * cam_context_handle_release_dev()
+ *
+ * @brief:        Handle release device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Release device command payload
+ *
+ */
+int cam_context_handle_release_dev(struct cam_context *ctx,
+		struct cam_release_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_release_hw()
+ *
+ * @brief:        Handle release HW command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Release HW command payload
+ *
+ */
+int cam_context_handle_release_hw(struct cam_context *ctx,
+		void *cmd);
+
+/**
+ * cam_context_handle_config_dev()
+ *
+ * @brief:        Handle config device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Config device command payload
+ *
+ */
+int cam_context_handle_config_dev(struct cam_context *ctx,
+		struct cam_config_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_flush_dev()
+ *
+ * @brief:        Handle flush device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Flush device command payload
+ *
+ */
+int cam_context_handle_flush_dev(struct cam_context *ctx,
+		struct cam_flush_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_start_dev()
+ *
+ * @brief:        Handle start device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Start device command payload
+ *
+ */
+int cam_context_handle_start_dev(struct cam_context *ctx,
+		struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_handle_stop_dev()
+ *
+ * @brief:        Handle stop device command
+ *
+ * @ctx:          Object pointer for cam_context
+ * @cmd:          Stop device command payload
+ *
+ */
+int cam_context_handle_stop_dev(struct cam_context *ctx,
+		struct cam_start_stop_dev_cmd *cmd);
+
+/**
+ * cam_context_deinit()
+ *
+ * @brief:        Camera context deinitialize function
+ *
+ * @ctx:          Object pointer for cam_context
+ *
+ */
+int cam_context_deinit(struct cam_context *ctx);
+
+/**
+ * cam_context_init()
+ *
+ * @brief:        Camera context initialize function
+ *
+ * @ctx:                   Object pointer for cam_context
+ * @dev_name:              String giving name of device associated
+ * @dev_id:                ID of the device associated
+ * @ctx_id:                ID for this context
+ * @crm_node_intf:         Function table for crm to context interface
+ * @hw_mgr_intf:           Function table for context to hw interface
+ * @req_list:              Requests storage
+ * @req_size:              Size of the request storage
+ *
+ */
+int cam_context_init(struct cam_context *ctx,
+		const char *dev_name,
+		uint64_t dev_id,
+		uint32_t ctx_id,
+		struct cam_req_mgr_kmd_ops *crm_node_intf,
+		struct cam_hw_mgr_intf *hw_mgr_intf,
+		struct cam_ctx_request *req_list,
+		uint32_t req_size);
+
+/**
+ * cam_context_putref()
+ *
+ * @brief:       Put back context reference.
+ *
+ * @ctx:                  Context for which ref is returned
+ *
+ */
+void cam_context_putref(struct cam_context *ctx);
+
+/**
+ * cam_context_getref()
+ *
+ * @brief:       Get back context reference.
+ *
+ * @ctx:                  Context for which ref is taken
+ *
+ */
+void cam_context_getref(struct cam_context *ctx);
+
+#endif  /* _CAM_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
new file mode 100644
index 0000000..2d6b91a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.c
@@ -0,0 +1,948 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/cam_sync.h>
+#include <media/cam_defs.h>
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_mem_mgr.h"
+#include "cam_node.h"
+#include "cam_req_mgr_util.h"
+#include "cam_sync_api.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+
+static uint cam_debug_ctx_req_list;
+module_param(cam_debug_ctx_req_list, uint, 0644);
+
+static inline int cam_context_validate_thread(void)
+{
+	if (in_interrupt()) {
+		WARN(1, "Invalid execution context\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int cam_context_buf_done_from_hw(struct cam_context *ctx,
+	void *done_event_data, uint32_t bubble_state)
+{
+	int j;
+	int result;
+	struct cam_ctx_request *req;
+	struct cam_hw_done_event_data *done =
+		(struct cam_hw_done_event_data *)done_event_data;
+	int rc;
+
+	if (!ctx || !done) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, done);
+		return -EINVAL;
+	}
+
+	rc = cam_context_validate_thread();
+	if (rc)
+		return rc;
+
+	spin_lock(&ctx->lock);
+	if (list_empty(&ctx->active_req_list)) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] no active request",
+			ctx->dev_name, ctx->ctx_id);
+		spin_unlock(&ctx->lock);
+		return -EIO;
+	}
+	req = list_first_entry(&ctx->active_req_list,
+		struct cam_ctx_request, list);
+
+	trace_cam_buf_done("UTILS", ctx, req);
+
+	if (done->request_id != req->request_id) {
+		CAM_ERR(CAM_CTXT,
+			"[%s][%d] mismatch: done req[%lld], active req[%lld]",
+			ctx->dev_name, ctx->ctx_id,
+			done->request_id, req->request_id);
+		spin_unlock(&ctx->lock);
+		return -EIO;
+	}
+
+	if (!req->num_out_map_entries) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] no output fence to signal",
+			ctx->dev_name, ctx->ctx_id);
+		spin_unlock(&ctx->lock);
+		return -EIO;
+	}
+
+	/*
+	 * since another thread may be adding/removing from active
+	 * list, so hold the lock
+	 */
+	list_del_init(&req->list);
+	spin_unlock(&ctx->lock);
+	if (!bubble_state) {
+		result = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+	} else {
+		CAM_DBG(CAM_REQ,
+			"[%s][ctx_id %d] : req[%llu] is done with error",
+			ctx->dev_name, ctx->ctx_id, req->request_id);
+
+		for (j = 0; j < req->num_out_map_entries; j++)
+			CAM_DBG(CAM_REQ, "fence %d signaled with error",
+				req->out_map_entries[j].sync_id);
+
+		result = CAM_SYNC_STATE_SIGNALED_ERROR;
+	}
+
+	for (j = 0; j < req->num_out_map_entries; j++) {
+		cam_sync_signal(req->out_map_entries[j].sync_id, result);
+		req->out_map_entries[j].sync_id = -1;
+	}
+
+	if (cam_debug_ctx_req_list & ctx->dev_id)
+		CAM_INFO(CAM_CTXT,
+			"[%s][%d] : Moving req[%llu] from active_list to free_list",
+			ctx->dev_name, ctx->ctx_id, req->request_id);
+
+	/*
+	 * another thread may be adding/removing from free list,
+	 * so hold the lock
+	 */
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	req->ctx = NULL;
+	spin_unlock(&ctx->lock);
+
+	return 0;
+}
+
+static int cam_context_apply_req_to_hw(struct cam_ctx_request *req,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_context *ctx = req->ctx;
+	struct cam_hw_config_args cfg;
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	spin_lock(&ctx->lock);
+	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
+	spin_unlock(&ctx->lock);
+
+	if (cam_debug_ctx_req_list & ctx->dev_id)
+		CAM_INFO(CAM_CTXT,
+			"[%s][%d] : Moving req[%llu] from pending_list to active_list",
+			ctx->dev_name, ctx->ctx_id, req->request_id);
+
+	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	cfg.request_id = req->request_id;
+	cfg.hw_update_entries = req->hw_update_entries;
+	cfg.num_hw_update_entries = req->num_hw_update_entries;
+	cfg.out_map_entries = req->out_map_entries;
+	cfg.num_out_map_entries = req->num_out_map_entries;
+	cfg.priv = req->req_priv;
+
+	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc) {
+		spin_lock(&ctx->lock);
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+		spin_unlock(&ctx->lock);
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Moving req[%llu] from active_list to free_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+	}
+
+end:
+	return rc;
+}
+
+static void cam_context_sync_callback(int32_t sync_obj, int status, void *data)
+{
+	struct cam_ctx_request *req = data;
+	struct cam_context *ctx = NULL;
+	struct cam_flush_dev_cmd flush_cmd;
+	struct cam_req_mgr_apply_request apply;
+	int rc;
+
+	if (!req) {
+		CAM_ERR(CAM_CTXT, "Invalid input param");
+		return;
+	}
+	rc = cam_context_validate_thread();
+	if (rc)
+		return;
+
+	ctx = req->ctx;
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid ctx for req %llu", req->request_id);
+		return;
+	}
+
+	if (atomic_inc_return(&req->num_in_acked) == req->num_in_map_entries) {
+		apply.request_id = req->request_id;
+		/*
+		 * take mutex to ensure that another thread does
+		 * not flush the request while this
+		 * thread is submitting it to h/w. The submit to
+		 * h/w and adding to the active list should happen
+		 * in a critical section which is provided by this
+		 * mutex.
+		 */
+		if (status == CAM_SYNC_STATE_SIGNALED_ERROR) {
+			CAM_DBG(CAM_CTXT, "fence error: %d", sync_obj);
+			flush_cmd.req_id = req->request_id;
+			cam_context_flush_req_to_hw(ctx, &flush_cmd);
+		}
+
+		mutex_lock(&ctx->sync_mutex);
+		if (!req->flushed) {
+			cam_context_apply_req_to_hw(req, &apply);
+			mutex_unlock(&ctx->sync_mutex);
+		} else {
+			req->flushed = 0;
+			req->ctx = NULL;
+			mutex_unlock(&ctx->sync_mutex);
+			spin_lock(&ctx->lock);
+			list_del_init(&req->list);
+			list_add_tail(&req->list, &ctx->free_req_list);
+			spin_unlock(&ctx->lock);
+
+			if (cam_debug_ctx_req_list & ctx->dev_id)
+				CAM_INFO(CAM_CTXT,
+					"[%s][%d] : Moving req[%llu] from pending_list to free_list",
+					ctx->dev_name, ctx->ctx_id,
+					req->request_id);
+		}
+	}
+	cam_context_putref(ctx);
+}
+
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	struct cam_hw_release_args arg;
+
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid input param");
+		return -EINVAL;
+	}
+
+	if ((!ctx->hw_mgr_intf) || (!ctx->hw_mgr_intf->hw_release)) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		return -EINVAL;
+	}
+
+	arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	arg.active_req = false;
+
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &arg);
+	ctx->ctxt_to_hw_map = NULL;
+
+	ctx->session_hdl = -1;
+	ctx->dev_hdl = -1;
+	ctx->link_hdl = -1;
+
+	return 0;
+}
+
+int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_ctx_request *req = NULL;
+	struct cam_hw_prepare_update_args cfg;
+	uintptr_t packet_addr;
+	struct cam_packet *packet;
+	size_t len = 0;
+	int32_t i = 0, j = 0;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+	rc = cam_context_validate_thread();
+	if (rc)
+		return rc;
+
+	spin_lock(&ctx->lock);
+	if (!list_empty(&ctx->free_req_list)) {
+		req = list_first_entry(&ctx->free_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+	}
+	spin_unlock(&ctx->lock);
+
+	if (!req) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] No more request obj free",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	memset(req, 0, sizeof(*req));
+	INIT_LIST_HEAD(&req->list);
+	req->ctx = ctx;
+
+	/* for config dev, only memory handle is supported */
+	/* map packet from the memhandle */
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		&packet_addr, &len);
+	if (rc != 0) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] Can not get packet address",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	packet = (struct cam_packet *) ((uint8_t *)packet_addr +
+		(uint32_t)cmd->offset);
+
+	if (packet->header.request_id <= ctx->last_flush_req) {
+		CAM_DBG(CAM_CORE,
+			"request %lld has been flushed, reject packet",
+			packet->header.request_id);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	if (packet->header.request_id > ctx->last_flush_req)
+		ctx->last_flush_req = 0;
+
+	/* preprocess the configuration */
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.packet = packet;
+	cfg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	cfg.max_hw_update_entries = CAM_CTX_CFG_MAX;
+	cfg.num_hw_update_entries = req->num_hw_update_entries;
+	cfg.hw_update_entries = req->hw_update_entries;
+	cfg.max_out_map_entries = CAM_CTX_CFG_MAX;
+	cfg.out_map_entries = req->out_map_entries;
+	cfg.max_in_map_entries = CAM_CTX_CFG_MAX;
+	cfg.in_map_entries = req->in_map_entries;
+	cfg.pf_data = &(req->pf_data);
+
+	rc = ctx->hw_mgr_intf->hw_prepare_update(
+		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc != 0) {
+		CAM_ERR(CAM_CTXT,
+			"[%s][%d] Prepare config packet failed in HW layer",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto free_req;
+	}
+	req->num_hw_update_entries = cfg.num_hw_update_entries;
+	req->num_out_map_entries = cfg.num_out_map_entries;
+	req->num_in_map_entries = cfg.num_in_map_entries;
+	atomic_set(&req->num_in_acked, 0);
+	req->request_id = packet->header.request_id;
+	req->status = 1;
+	req->req_priv = cfg.priv;
+
+	for (i = 0; i < req->num_out_map_entries; i++) {
+		rc = cam_sync_get_obj_ref(req->out_map_entries[i].sync_id);
+		if (rc) {
+			CAM_ERR(CAM_CTXT, "Can't get ref for sync %d",
+				req->out_map_entries[i].sync_id);
+			goto put_ref;
+		}
+	}
+
+	if (req->num_in_map_entries > 0) {
+		spin_lock(&ctx->lock);
+		list_add_tail(&req->list, &ctx->pending_req_list);
+		spin_unlock(&ctx->lock);
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Moving req[%llu] from free_list to pending_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+
+		for (j = 0; j < req->num_in_map_entries; j++) {
+			cam_context_getref(ctx);
+			rc = cam_sync_register_callback(
+					cam_context_sync_callback,
+					(void *)req,
+					req->in_map_entries[j].sync_id);
+			if (rc) {
+				CAM_ERR(CAM_CTXT,
+					"[%s][%d] Failed register fence cb: %d ret = %d",
+					ctx->dev_name, ctx->ctx_id,
+					req->in_map_entries[j].sync_id, rc);
+				spin_lock(&ctx->lock);
+				list_del_init(&req->list);
+				spin_unlock(&ctx->lock);
+
+				if (cam_debug_ctx_req_list & ctx->dev_id)
+					CAM_INFO(CAM_CTXT,
+						"[%s][%d] : Moving req[%llu] from pending_list to free_list",
+						ctx->dev_name, ctx->ctx_id,
+						req->request_id);
+
+				cam_context_putref(ctx);
+
+				goto put_ref;
+			}
+			CAM_DBG(CAM_CTXT, "register in fence cb: %d ret = %d",
+				req->in_map_entries[j].sync_id, rc);
+		}
+		goto end;
+	}
+
+	return rc;
+
+put_ref:
+	for (--i; i >= 0; i--) {
+		rc = cam_sync_put_obj_ref(req->out_map_entries[i].sync_id);
+		if (rc)
+			CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
+				req->out_map_entries[i].sync_id);
+	}
+free_req:
+	spin_lock(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	req->ctx = NULL;
+	spin_unlock(&ctx->lock);
+end:
+	return rc;
+}
+
+int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+	struct cam_hw_acquire_args param;
+	struct cam_create_dev_hdl req_hdl_param;
+	struct cam_hw_release_args release;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CAM_DBG(CAM_CTXT, "ses hdl: %x, num_res: %d, type: %d, res: %lld",
+		cmd->session_handle, cmd->num_resources, cmd->handle_type,
+		cmd->resource_hdl);
+
+	if (cmd->num_resources > CAM_CTX_RES_MAX) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] resource limit exceeded",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	/* for now we only support user pointer */
+	if (cmd->handle_type != 1)  {
+		CAM_ERR(CAM_CTXT, "[%s][%d] Only user pointer is supported",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* fill in parameters */
+	param.context_data = ctx;
+	param.event_cb = ctx->irq_cb_intf;
+	param.num_acq = cmd->num_resources;
+	param.acquire_info = cmd->resource_hdl;
+
+	/* call HW manager to reserve the resource */
+	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+		&param);
+	if (rc != 0) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] Acquire device failed",
+			ctx->dev_name, ctx->ctx_id);
+		goto end;
+	}
+
+	ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
+
+	/* if hw resource acquire successful, acquire dev handle */
+	req_hdl_param.session_hdl = cmd->session_handle;
+	/* bridge is not ready for these flags. so false for now */
+	req_hdl_param.v4l2_sub_dev_flag = 0;
+	req_hdl_param.media_entity_flag = 0;
+	req_hdl_param.priv = ctx;
+	req_hdl_param.ops = ctx->crm_ctx_intf;
+
+	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+	if (ctx->dev_hdl <= 0) {
+		rc = -EFAULT;
+		CAM_ERR(CAM_CTXT, "[%s][%d] Can not create device handle",
+			ctx->dev_name, ctx->ctx_id);
+		goto free_hw;
+	}
+	cmd->dev_handle = ctx->dev_hdl;
+
+	/* store session information */
+	ctx->session_hdl = cmd->session_handle;
+
+	return rc;
+
+free_hw:
+	release.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+	ctx->ctxt_to_hw_map = NULL;
+	ctx->dev_hdl = -1;
+end:
+	return rc;
+}
+
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx)
+{
+	struct cam_hw_flush_args flush_args;
+	struct list_head temp_list;
+	struct cam_ctx_request *req;
+	uint32_t i;
+	int rc = 0;
+	bool free_req;
+
+	CAM_DBG(CAM_CTXT, "[%s] E: NRT flush ctx", ctx->dev_name);
+	memset(&flush_args, 0, sizeof(flush_args));
+
+	/*
+	 * flush pending requests, take the sync lock to synchronize with the
+	 * sync callback thread so that the sync cb thread does not try to
+	 * submit request to h/w while the request is being flushed
+	 */
+	mutex_lock(&ctx->sync_mutex);
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->pending_req_list, &temp_list);
+	spin_unlock(&ctx->lock);
+
+	if (cam_debug_ctx_req_list & ctx->dev_id)
+		CAM_INFO(CAM_CTXT,
+			"[%s][%d] : Moving all pending requests from pending_list to temp_list",
+			ctx->dev_name, ctx->ctx_id);
+
+	flush_args.num_req_pending = 0;
+	while (true) {
+		spin_lock(&ctx->lock);
+		if (list_empty(&temp_list)) {
+			spin_unlock(&ctx->lock);
+			break;
+		}
+
+		req = list_first_entry(&temp_list,
+				struct cam_ctx_request, list);
+
+		list_del_init(&req->list);
+		spin_unlock(&ctx->lock);
+		req->flushed = 1;
+
+		flush_args.flush_req_pending[flush_args.num_req_pending++] =
+			req->req_priv;
+
+		free_req = false;
+		for (i = 0; i < req->num_in_map_entries; i++) {
+			rc = cam_sync_deregister_callback(
+				cam_context_sync_callback,
+				(void *)req,
+				req->in_map_entries[i].sync_id);
+			if (!rc) {
+				cam_context_putref(ctx);
+				if (atomic_inc_return(&req->num_in_acked) ==
+					req->num_in_map_entries)
+					free_req = true;
+			}
+		}
+
+		for (i = 0; i < req->num_out_map_entries; i++) {
+			if (req->out_map_entries[i].sync_id != -1) {
+				rc = cam_sync_signal(
+					req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc == -EALREADY) {
+					CAM_ERR(CAM_CTXT,
+					"Req: %llu already signalled, sync_id:%d",
+					req->request_id,
+					req->out_map_entries[i].sync_id);
+					break;
+				}
+			}
+		}
+
+		/*
+		 * If we have deregistered the last sync callback, req will
+		 * not be put on the free list. So put it on the free list here
+		 */
+		if (free_req) {
+			req->ctx = NULL;
+			spin_lock(&ctx->lock);
+			list_add_tail(&req->list, &ctx->free_req_list);
+			spin_unlock(&ctx->lock);
+		}
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Deleting req[%llu] from temp_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+	}
+	mutex_unlock(&ctx->sync_mutex);
+
+	if (ctx->hw_mgr_intf->hw_flush) {
+		flush_args.num_req_active = 0;
+		spin_lock(&ctx->lock);
+		list_for_each_entry(req, &ctx->active_req_list, list) {
+			flush_args.flush_req_active[flush_args.num_req_active++]
+				= req->req_priv;
+		}
+		spin_unlock(&ctx->lock);
+
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+			flush_args.flush_type = CAM_FLUSH_TYPE_ALL;
+			ctx->hw_mgr_intf->hw_flush(
+				ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+		}
+	}
+
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock(&ctx->lock);
+	list_splice_init(&ctx->active_req_list, &temp_list);
+	INIT_LIST_HEAD(&ctx->active_req_list);
+	spin_unlock(&ctx->lock);
+
+	if (cam_debug_ctx_req_list & ctx->dev_id)
+		CAM_INFO(CAM_CTXT,
+			"[%s][%d] : Moving all requests from active_list to temp_list",
+			ctx->dev_name, ctx->ctx_id);
+
+	while (true) {
+		spin_lock(&ctx->lock);
+		if (list_empty(&temp_list)) {
+			spin_unlock(&ctx->lock);
+			break;
+		}
+		req = list_first_entry(&temp_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		spin_unlock(&ctx->lock);
+
+		for (i = 0; i < req->num_out_map_entries; i++) {
+			if (req->out_map_entries[i].sync_id != -1) {
+				rc = cam_sync_signal(
+					req->out_map_entries[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc == -EALREADY) {
+					CAM_ERR(CAM_CTXT,
+						"Req: %llu already signalled ctx: %pK dev_name: %s dev_handle: %d ctx_state: %d",
+						req->request_id, req->ctx,
+						req->ctx->dev_name,
+						req->ctx->dev_hdl,
+						req->ctx->state);
+					break;
+				}
+			}
+		}
+
+		spin_lock(&ctx->lock);
+		list_add_tail(&req->list, &ctx->free_req_list);
+		spin_unlock(&ctx->lock);
+		req->ctx = NULL;
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Moving req[%llu] from temp_list to free_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+	}
+
+	CAM_DBG(CAM_CTXT, "[%s] X: NRT flush ctx", ctx->dev_name);
+
+	return 0;
+}
+
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	struct cam_ctx_request *req = NULL;
+	struct cam_hw_flush_args flush_args;
+	uint32_t i;
+	int32_t sync_id = 0;
+	int rc = 0;
+	bool free_req = false;
+
+	CAM_DBG(CAM_CTXT, "[%s] E: NRT flush req", ctx->dev_name);
+
+	memset(&flush_args, 0, sizeof(flush_args));
+	flush_args.num_req_pending = 0;
+	flush_args.num_req_active = 0;
+	mutex_lock(&ctx->sync_mutex);
+	spin_lock(&ctx->lock);
+	list_for_each_entry(req, &ctx->pending_req_list, list) {
+		if (req->request_id != cmd->req_id)
+			continue;
+
+		if (cam_debug_ctx_req_list & ctx->dev_id)
+			CAM_INFO(CAM_CTXT,
+				"[%s][%d] : Deleting req[%llu] from pending_list",
+				ctx->dev_name, ctx->ctx_id, req->request_id);
+
+		list_del_init(&req->list);
+		req->flushed = 1;
+
+		flush_args.flush_req_pending[flush_args.num_req_pending++] =
+			req->req_priv;
+		break;
+	}
+	spin_unlock(&ctx->lock);
+	mutex_unlock(&ctx->sync_mutex);
+
+	if (ctx->hw_mgr_intf->hw_flush) {
+		if (!flush_args.num_req_pending) {
+			spin_lock(&ctx->lock);
+			list_for_each_entry(req, &ctx->active_req_list, list) {
+				if (req->request_id != cmd->req_id)
+					continue;
+
+				list_del_init(&req->list);
+
+				flush_args.flush_req_active[
+					flush_args.num_req_active++] =
+					req->req_priv;
+				break;
+			}
+			spin_unlock(&ctx->lock);
+		}
+
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			flush_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+			flush_args.flush_type = CAM_FLUSH_TYPE_REQ;
+			ctx->hw_mgr_intf->hw_flush(
+				ctx->hw_mgr_intf->hw_mgr_priv, &flush_args);
+		}
+	}
+
+	if (req) {
+		if (flush_args.num_req_pending) {
+			for (i = 0; i < req->num_in_map_entries; i++) {
+				rc = cam_sync_deregister_callback(
+					cam_context_sync_callback,
+					(void *)req,
+					req->in_map_entries[i].sync_id);
+				if (rc)
+					continue;
+
+				cam_context_putref(ctx);
+				if (atomic_inc_return(&req->num_in_acked) ==
+					req->num_in_map_entries)
+					free_req = true;
+			}
+		}
+
+		if (flush_args.num_req_pending || flush_args.num_req_active) {
+			for (i = 0; i < req->num_out_map_entries; i++) {
+				sync_id =
+					req->out_map_entries[i].sync_id;
+				if (sync_id != -1) {
+					rc = cam_sync_signal(sync_id,
+						CAM_SYNC_STATE_SIGNALED_ERROR);
+					if (rc == -EALREADY) {
+						CAM_ERR(CAM_CTXT,
+						"Req: %llu already signalled, sync_id:%d",
+						req->request_id, sync_id);
+						break;
+					}
+				}
+			}
+			if (flush_args.num_req_active || free_req) {
+				req->ctx = NULL;
+				spin_lock(&ctx->lock);
+				list_add_tail(&req->list, &ctx->free_req_list);
+				spin_unlock(&ctx->lock);
+
+				if (cam_debug_ctx_req_list & ctx->dev_id)
+					CAM_INFO(CAM_CTXT,
+						"[%s][%d] : Moving req[%llu] from %s to free_list",
+						ctx->dev_name, ctx->ctx_id,
+						req->request_id,
+						flush_args.num_req_active ?
+							"active_list" :
+							"pending_list");
+			}
+		}
+	}
+	CAM_DBG(CAM_CTXT, "[%s] X: NRT flush req", ctx->dev_name);
+
+	return 0;
+}
+
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+
+	int rc = 0;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (cmd->flush_type == CAM_FLUSH_TYPE_ALL) {
+		ctx->last_flush_req = cmd->req_id;
+		rc = cam_context_flush_ctx_to_hw(ctx);
+	} else if (cmd->flush_type == CAM_FLUSH_TYPE_REQ)
+		rc = cam_context_flush_req_to_hw(ctx, cmd);
+	else {
+		rc = -EINVAL;
+		CAM_ERR(CAM_CORE, "[%s][%d] Invalid flush type %d",
+			ctx->dev_name, ctx->ctx_id, cmd->flush_type);
+	}
+
+end:
+	return rc;
+}
+
+int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_start_args arg;
+
+	if (!ctx || !cmd) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK %pK", ctx, cmd);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if ((cmd->session_handle != ctx->session_hdl) ||
+		(cmd->dev_handle != ctx->dev_hdl)) {
+		CAM_ERR(CAM_CTXT,
+			"[%s][%d] Invalid session hdl[%d], dev_handle[%d]",
+			ctx->dev_name, ctx->ctx_id,
+			cmd->session_handle, cmd->dev_handle);
+		rc = -EPERM;
+		goto end;
+	}
+
+	if (ctx->hw_mgr_intf->hw_start) {
+		arg.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+				&arg);
+		if (rc) {
+			/* HW failure. user need to clean up the resource */
+			CAM_ERR(CAM_CTXT, "[%s][%d] Start HW failed",
+				ctx->dev_name, ctx->ctx_id);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_hw_stop_args stop;
+
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid input param");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = cam_context_validate_thread();
+	if (rc)
+		goto end;
+
+	rc = cam_context_flush_ctx_to_hw(ctx);
+	if (rc)
+		goto end;
+
+	/* stop hw first */
+	if (ctx->hw_mgr_intf->hw_stop) {
+		stop.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+			&stop);
+	}
+
+end:
+	return rc;
+}
+
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+	struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+	bool *mem_found)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args cmd_args;
+
+	if (!ctx) {
+		CAM_ERR(CAM_CTXT, "Invalid input params %pK ", ctx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_CTXT, "[%s][%d] HW interface is not ready",
+			ctx->dev_name, ctx->ctx_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (ctx->hw_mgr_intf->hw_cmd) {
+		cmd_args.ctxt_to_hw_map = ctx->ctxt_to_hw_map;
+		cmd_args.cmd_type = CAM_HW_MGR_CMD_DUMP_PF_INFO;
+		cmd_args.u.pf_args.pf_data.packet = packet;
+		cmd_args.u.pf_args.iova = iova;
+		cmd_args.u.pf_args.buf_info = buf_info;
+		cmd_args.u.pf_args.mem_found = mem_found;
+		ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+			&cmd_args);
+	}
+
+end:
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
new file mode 100644
index 0000000..6622047
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_context_utils.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CONTEXT_UTILS_H_
+#define _CAM_CONTEXT_UTILS_H_
+
+#include <linux/types.h>
+
+int cam_context_buf_done_from_hw(struct cam_context *ctx,
+	void *done_event_data, uint32_t bubble_state);
+int32_t cam_context_release_dev_to_hw(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd);
+int32_t cam_context_prepare_dev_to_hw(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd);
+int32_t cam_context_acquire_dev_to_hw(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd);
+int32_t cam_context_start_dev_to_hw(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd);
+int32_t cam_context_stop_dev_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_dev_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_flush_ctx_to_hw(struct cam_context *ctx);
+int32_t cam_context_flush_req_to_hw(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd);
+int32_t cam_context_dump_pf_info_to_hw(struct cam_context *ctx,
+	struct cam_packet *packet, unsigned long iova, uint32_t buf_info,
+	bool *mem_found);
+
+#endif /* _CAM_CONTEXT_UTILS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw.h b/drivers/media/platform/msm/camera/cam_core/cam_hw.h
new file mode 100644
index 0000000..4b66767
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HW_H_
+#define _CAM_HW_H_
+
+#include "cam_soc_util.h"
+
+/*
+ * This file declares Enums, Structures and APIs to be used as template
+ * when writing any HW driver in the camera subsystem.
+ */
+
+/* Hardware state enum */
+enum cam_hw_state {
+	CAM_HW_STATE_POWER_DOWN,
+	CAM_HW_STATE_POWER_UP,
+};
+
+/**
+ * struct cam_hw_info - Common hardware information
+ *
+ * @hw_mutex:              Hardware mutex
+ * @hw_lock:               Hardware spinlock
+ * @hw_complete:           Hardware Completion
+ * @open_count:            Count to track the HW enable from the client
+ * @hw_state:              Hardware state
+ * @soc_info:              Platform SOC properties for hardware
+ * @node_info:             Private HW data related to nodes
+ * @core_info:             Private HW data related to core logic
+ *
+ */
+struct cam_hw_info {
+	struct mutex                    hw_mutex;
+	spinlock_t                      hw_lock;
+	struct completion               hw_complete;
+	uint32_t                        open_count;
+	enum cam_hw_state               hw_state;
+	struct cam_hw_soc_info          soc_info;
+	void                           *node_info;
+	void                           *core_info;
+};
+
+#endif /* _CAM_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
new file mode 100644
index 0000000..0ea201d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_intf.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HW_INTF_H_
+#define _CAM_HW_INTF_H_
+
+#include <linux/types.h>
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW driver and HW Manager.
+ */
+
+/**
+ * struct cam_hw_ops - Hardware layer interface functions
+ *
+ * @get_hw_caps:           Function pointer for get hw caps
+ * @init:                  Function poniter for initialize hardware
+ * @deinit:                Function pointer for deinitialize hardware
+ * @reset:                 Function pointer for reset hardware
+ * @reserve:               Function pointer for reserve hardware
+ * @release:               Function pointer for release hardware
+ * @start:                 Function pointer for start hardware
+ * @stop:                  Function pointer for stop hardware
+ * @read:                  Function pointer for read hardware registers
+ * @write:                 Function pointer for Write hardware registers
+ * @process_cmd:           Function pointer for additional hardware controls
+ * @flush_cmd:             Function pointer for flush requests
+ *
+ */
+struct cam_hw_ops {
+	int (*get_hw_caps)(void *hw_priv,
+		void *get_hw_cap_args, uint32_t arg_size);
+	int (*init)(void *hw_priv,
+		void *init_hw_args, uint32_t arg_size);
+	int (*deinit)(void *hw_priv,
+		void *init_hw_args, uint32_t arg_size);
+	int (*reset)(void *hw_priv,
+		void *reset_core_args, uint32_t arg_size);
+	int (*reserve)(void *hw_priv,
+		void *reserve_args, uint32_t arg_size);
+	int (*release)(void *hw_priv,
+		void *release_args, uint32_t arg_size);
+	int (*start)(void *hw_priv,
+		void *start_args, uint32_t arg_size);
+	int (*stop)(void *hw_priv,
+		void *stop_args, uint32_t arg_size);
+	int (*read)(void *hw_priv,
+		void *read_args, uint32_t arg_size);
+	int (*write)(void *hw_priv,
+		void *write_args, uint32_t arg_size);
+	int (*process_cmd)(void *hw_priv,
+		uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+	int (*flush)(void *hw_priv,
+		void *flush_args, uint32_t arg_size);
+};
+
+/**
+ * struct cam_hw_intf - Common hardware node
+ *
+ * @hw_type:               Hardware type
+ * @hw_idx:                Hardware ID
+ * @hw_ops:                Hardware interface function table
+ * @hw_priv:               Private hardware node pointer
+ *
+ */
+struct cam_hw_intf {
+	uint32_t                     hw_type;
+	uint32_t                     hw_idx;
+	struct cam_hw_ops            hw_ops;
+	void                        *hw_priv;
+};
+
+#endif /* _CAM_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
new file mode 100644
index 0000000..b889023
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_hw_mgr_intf.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HW_MGR_INTF_H_
+#define _CAM_HW_MGR_INTF_H_
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+/*
+ * This file declares Constants, Enums, Structures and APIs to be used as
+ * Interface between HW Manager and Context.
+ */
+
+
+/* maximum context numbers */
+#define CAM_CTX_MAX                         8
+
+/* maximum buf done irqs */
+#define CAM_NUM_OUT_PER_COMP_IRQ_MAX        12
+
+/* hardware event callback function type */
+typedef int (*cam_hw_event_cb_func)(void *context, uint32_t evt_id,
+	void *evt_data);
+
+/* hardware page fault callback function type */
+typedef int (*cam_hw_pagefault_cb_func)(void *context, unsigned long iova,
+	uint32_t buf_info);
+
+/**
+ * struct cam_hw_update_entry - Entry for hardware config
+ *
+ * @handle:                Memory handle for the configuration
+ * @offset:                Memory offset
+ * @len:                   Size of the configuration
+ * @flags:                 Flags for the config entry(eg. DMI)
+ * @addr:                  Address of hardware update entry
+ *
+ */
+struct cam_hw_update_entry {
+	int                handle;
+	uint32_t           offset;
+	uint32_t           len;
+	uint32_t           flags;
+	uintptr_t          addr;
+};
+
+/**
+ * struct cam_hw_fence_map_entry - Entry for the resource to sync id map
+ *
+ * @resrouce_handle:       Resource port id for the buffer
+ * @sync_id:               Sync id
+ *
+ */
+struct cam_hw_fence_map_entry {
+	uint32_t           resource_handle;
+	int32_t            sync_id;
+};
+
+/**
+ * struct cam_hw_done_event_data - Payload for hw done event
+ *
+ * @num_handles:           number of handles in the event
+ * @resrouce_handle:       list of the resource handle
+ * @timestamp:             time stamp
+ * @request_id:            request identifier
+ *
+ */
+struct cam_hw_done_event_data {
+	uint32_t           num_handles;
+	uint32_t           resource_handle[CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+	struct timeval     timestamp;
+	uint64_t           request_id;
+};
+
+/**
+ * struct cam_hw_acquire_args - Payload for acquire command
+ *
+ * @context_data:          Context data pointer for the callback function
+ * @event_cb:              Callback function array
+ * @num_acq:               Total number of acquire in the payload
+ * @acquire_info:          Acquired resource array pointer
+ * @ctxt_to_hw_map:        HW context (returned)
+ *
+ */
+struct cam_hw_acquire_args {
+	void                        *context_data;
+	cam_hw_event_cb_func         event_cb;
+	uint32_t                     num_acq;
+	uint32_t                     acquire_info_size;
+	uintptr_t                    acquire_info;
+	void                        *ctxt_to_hw_map;
+};
+
+/**
+ * struct cam_hw_release_args - Payload for release command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @active_req:            Active request flag
+ *
+ */
+struct cam_hw_release_args {
+	void              *ctxt_to_hw_map;
+	bool               active_req;
+};
+
+/**
+ * struct cam_hw_start_args - Payload for start command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_hw_update_entries: Number of Hardware configuration
+ * @hw_update_entries:     Hardware configuration list
+ *
+ */
+struct cam_hw_start_args {
+	void                        *ctxt_to_hw_map;
+	uint32_t                     num_hw_update_entries;
+	struct cam_hw_update_entry  *hw_update_entries;
+};
+
+/**
+ * struct cam_hw_stop_args - Payload for stop command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @args:                  Arguments to pass for stop
+ *
+ */
+struct cam_hw_stop_args {
+	void              *ctxt_to_hw_map;
+	void              *args;
+};
+
+
+/**
+ * struct cam_hw_mgr_dump_pf_data - page fault debug data
+ *
+ * packet:     pointer to packet
+ */
+struct cam_hw_mgr_dump_pf_data {
+	void    *packet;
+};
+
+/**
+ * struct cam_hw_prepare_update_args - Payload for prepare command
+ *
+ * @packet:                CSL packet from user mode driver
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @max_hw_update_entries: Maximum hardware update entries supported
+ * @hw_update_entries:     Actual hardware update configuration (returned)
+ * @num_hw_update_entries: Number of actual hardware update entries (returned)
+ * @max_out_map_entries:   Maximum output fence mapping supported
+ * @out_map_entries:       Actual output fence mapping list (returned)
+ * @num_out_map_entries:   Number of actual output fence mapping (returned)
+ * @max_in_map_entries:    Maximum input fence mapping supported
+ * @in_map_entries:        Actual input fence mapping list (returned)
+ * @num_in_map_entries:    Number of acutal input fence mapping (returned)
+ * @priv:                  Private pointer of hw update
+ * @pf_data:               Debug data for page fault
+ *
+ */
+struct cam_hw_prepare_update_args {
+	struct cam_packet              *packet;
+	void                           *ctxt_to_hw_map;
+	uint32_t                        max_hw_update_entries;
+	struct cam_hw_update_entry     *hw_update_entries;
+	uint32_t                        num_hw_update_entries;
+	uint32_t                        max_out_map_entries;
+	struct cam_hw_fence_map_entry  *out_map_entries;
+	uint32_t                        num_out_map_entries;
+	uint32_t                        max_in_map_entries;
+	struct cam_hw_fence_map_entry  *in_map_entries;
+	uint32_t                        num_in_map_entries;
+	void                           *priv;
+	struct cam_hw_mgr_dump_pf_data *pf_data;
+};
+
+/**
+ * struct cam_hw_config_args - Payload for config command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_hw_update_entries: Number of hardware update entries
+ * @hw_update_entries:     Hardware update list
+ * @out_map_entries:       Out map info
+ * @num_out_map_entries:   Number of out map entries
+ * @priv:                  Private pointer
+ * @request_id:            Request ID
+ *
+ */
+struct cam_hw_config_args {
+	void                           *ctxt_to_hw_map;
+	uint32_t                        num_hw_update_entries;
+	struct cam_hw_update_entry     *hw_update_entries;
+	struct cam_hw_fence_map_entry  *out_map_entries;
+	uint32_t                        num_out_map_entries;
+	void                           *priv;
+	uint64_t                        request_id;
+	bool                            init_packet;
+};
+
+/**
+ * struct cam_hw_flush_args - Flush arguments
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @num_req_pending:       Num request to flush, valid when flush type is REQ
+ * @flush_req_pending:     Request pending pointers to flush
+ * @num_req_active:        Num request to flush, valid when flush type is REQ
+ * @flush_req_active:      Request active pointers to flush
+ * @flush_type:            The flush type
+ *
+ */
+struct cam_hw_flush_args {
+	void                           *ctxt_to_hw_map;
+	uint32_t                        num_req_pending;
+	void                           *flush_req_pending[20];
+	uint32_t                        num_req_active;
+	void                           *flush_req_active[20];
+	enum flush_type_t               flush_type;
+};
+
+/**
+ * struct cam_hw_dump_pf_args - Payload for dump pf info command
+ *
+ * @pf_data:               Debug data for page fault
+ * @iova:                  Page fault address
+ * @buf_info:              Info about memory buffer where page
+ *                               fault occurred
+ * @mem_found:             If fault memory found in current
+ *                               request
+ *
+ */
+struct cam_hw_dump_pf_args {
+	struct cam_hw_mgr_dump_pf_data  pf_data;
+	unsigned long                   iova;
+	uint32_t                        buf_info;
+	bool                           *mem_found;
+};
+
+/* enum cam_hw_mgr_command - Hardware manager command type */
+enum cam_hw_mgr_command {
+	CAM_HW_MGR_CMD_INTERNAL,
+	CAM_HW_MGR_CMD_DUMP_PF_INFO,
+};
+
+/**
+ * struct cam_hw_cmd_args - Payload for hw manager command
+ *
+ * @ctxt_to_hw_map:        HW context from the acquire
+ * @cmd_type               HW command type
+ * @internal_args          Arguments for internal command
+ * @pf_args                Arguments for Dump PF info command
+ *
+ */
+struct cam_hw_cmd_args {
+	void                               *ctxt_to_hw_map;
+	uint32_t                            cmd_type;
+	union {
+		void                       *internal_args;
+		struct cam_hw_dump_pf_args  pf_args;
+	} u;
+};
+
+/**
+ * cam_hw_mgr_intf - HW manager interface
+ *
+ * @hw_mgr_priv:           HW manager object
+ * @hw_get_caps:           Function pointer for get hw caps
+ *                               args = cam_query_cap_cmd
+ * @hw_acquire:            Function poniter for acquire hw resources
+ *                               args = cam_hw_acquire_args
+ * @hw_release:            Function pointer for release hw device resource
+ *                               args = cam_hw_release_args
+ * @hw_start:              Function pointer for start hw devices
+ *                               args = cam_hw_start_args
+ * @hw_stop:               Function pointer for stop hw devices
+ *                               args = cam_hw_stop_args
+ * @hw_prepare_update:     Function pointer for prepare hw update for hw devices
+ *                               args = cam_hw_prepare_update_args
+ * @hw_config:             Function pointer for configure hw devices
+ *                               args = cam_hw_config_args
+ * @hw_read:               Function pointer for read hardware registers
+ * @hw_write:              Function pointer for Write hardware registers
+ * @hw_cmd:                Function pointer for any customized commands for the
+ *                         hardware manager
+ * @hw_open:               Function pointer for HW init
+ * @hw_close:              Function pointer for HW deinit
+ * @hw_flush:              Function pointer for HW flush
+ *
+ */
+struct cam_hw_mgr_intf {
+	void *hw_mgr_priv;
+
+	int (*hw_get_caps)(void *hw_priv, void *hw_caps_args);
+	int (*hw_acquire)(void *hw_priv, void *hw_acquire_args);
+	int (*hw_release)(void *hw_priv, void *hw_release_args);
+	int (*hw_start)(void *hw_priv, void *hw_start_args);
+	int (*hw_stop)(void *hw_priv, void *hw_stop_args);
+	int (*hw_prepare_update)(void *hw_priv, void *hw_prepare_update_args);
+	int (*hw_config)(void *hw_priv, void *hw_config_args);
+	int (*hw_read)(void *hw_priv, void *read_args);
+	int (*hw_write)(void *hw_priv, void *write_args);
+	int (*hw_cmd)(void *hw_priv, void *write_args);
+	int (*hw_open)(void *hw_priv, void *fw_download_args);
+	int (*hw_close)(void *hw_priv, void *hw_close_args);
+	int (*hw_flush)(void *hw_priv, void *hw_flush_args);
+};
+
+#endif /* _CAM_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.c b/drivers/media/platform/msm/camera/cam_core/cam_node.c
new file mode 100644
index 0000000..08a81d2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.c
@@ -0,0 +1,783 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+
+#include "cam_node.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+
+static void cam_node_print_ctx_state(
+	struct cam_node *node)
+{
+	int i;
+	struct cam_context *ctx;
+
+	CAM_INFO(CAM_CORE, "[%s] state=%d, ctx_size %d",
+		node->name, node->state, node->ctx_size);
+
+	mutex_lock(&node->list_mutex);
+	for (i = 0; i < node->ctx_size; i++) {
+		ctx = &node->ctx_list[i];
+
+		spin_lock(&ctx->lock);
+		CAM_INFO(CAM_CORE,
+			"[%s][%d] : state=%d, refcount=%d, active_req_list=%d, pending_req_list=%d, wait_req_list=%d, free_req_list=%d",
+			ctx->dev_name ? ctx->dev_name : "null",
+			i, ctx->state,
+			atomic_read(&(ctx->refcount.refcount.refs)),
+			list_empty(&ctx->active_req_list),
+			list_empty(&ctx->pending_req_list),
+			list_empty(&ctx->wait_req_list),
+			list_empty(&ctx->free_req_list));
+		spin_unlock(&ctx->lock);
+	}
+	mutex_unlock(&node->list_mutex);
+}
+
+static struct cam_context *cam_node_get_ctxt_from_free_list(
+		struct cam_node *node)
+{
+	struct cam_context *ctx = NULL;
+
+	mutex_lock(&node->list_mutex);
+	if (!list_empty(&node->free_ctx_list)) {
+		ctx = list_first_entry(&node->free_ctx_list,
+			struct cam_context, list);
+		list_del_init(&ctx->list);
+	}
+	mutex_unlock(&node->list_mutex);
+	if (ctx)
+		kref_init(&ctx->refcount);
+	return ctx;
+}
+
+void cam_node_put_ctxt_to_free_list(struct kref *ref)
+{
+	struct cam_context *ctx =
+		container_of(ref, struct cam_context, refcount);
+	struct cam_node *node = ctx->node;
+
+	mutex_lock(&node->list_mutex);
+	list_add_tail(&ctx->list, &node->free_ctx_list);
+	mutex_unlock(&node->list_mutex);
+}
+
+static int __cam_node_handle_query_cap(struct cam_node *node,
+	struct cam_query_cap_cmd *query)
+{
+	int rc = -EFAULT;
+
+	if (!query) {
+		CAM_ERR(CAM_CORE, "Invalid params");
+		return -EINVAL;
+	}
+
+	if (node->hw_mgr_intf.hw_get_caps) {
+		rc = node->hw_mgr_intf.hw_get_caps(
+			node->hw_mgr_intf.hw_mgr_priv, query);
+	}
+
+	return rc;
+}
+
+static int __cam_node_handle_acquire_dev(struct cam_node *node,
+	struct cam_acquire_dev_cmd *acquire)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!acquire)
+		return -EINVAL;
+
+	ctx = cam_node_get_ctxt_from_free_list(node);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "No free ctx in free list node %s",
+			node->name);
+		cam_node_print_ctx_state(node);
+
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	rc = cam_context_handle_acquire_dev(ctx, acquire);
+	if (rc) {
+		CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
+			node->name);
+		goto free_ctx;
+	}
+
+	CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
+		node->name, ctx->ctx_id);
+
+	return 0;
+free_ctx:
+	cam_context_putref(ctx);
+err:
+	return rc;
+}
+
+static int __cam_node_handle_acquire_hw_v1(struct cam_node *node,
+	struct cam_acquire_hw_cmd_v1 *acquire)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!acquire)
+		return -EINVAL;
+
+	if (acquire->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (acquire->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(acquire->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			acquire->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_acquire_hw(ctx, acquire);
+	if (rc) {
+		CAM_ERR(CAM_CORE, "Acquire device failed for node %s",
+			node->name);
+		return rc;
+	}
+
+	CAM_DBG(CAM_CORE, "[%s] Acquire ctx_id %d",
+		node->name, ctx->ctx_id);
+
+	return 0;
+}
+
+static int __cam_node_handle_start_dev(struct cam_node *node,
+	struct cam_start_stop_dev_cmd *start)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!start)
+		return -EINVAL;
+
+	if (start->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (start->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(start->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			start->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_start_dev(ctx, start);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Start failure for node %s", node->name);
+
+	return rc;
+}
+
+static int __cam_node_handle_stop_dev(struct cam_node *node,
+	struct cam_start_stop_dev_cmd *stop)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!stop)
+		return -EINVAL;
+
+	if (stop->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (stop->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(stop->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			stop->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_stop_dev(ctx, stop);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Stop failure for node %s", node->name);
+
+	return rc;
+}
+
+static int __cam_node_handle_config_dev(struct cam_node *node,
+	struct cam_config_dev_cmd *config)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!config)
+		return -EINVAL;
+
+	if (config->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (config->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(config->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			config->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_config_dev(ctx, config);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Config failure for node %s", node->name);
+
+	return rc;
+}
+
+static int __cam_node_handle_flush_dev(struct cam_node *node,
+	struct cam_flush_dev_cmd *flush)
+{
+	struct cam_context *ctx = NULL;
+	int rc;
+
+	if (!flush)
+		return -EINVAL;
+
+	if (flush->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (flush->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(flush->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			flush->dev_handle);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_flush_dev(ctx, flush);
+	if (rc)
+		CAM_ERR(CAM_CORE, "Flush failure for node %s", node->name);
+
+	return rc;
+}
+
+static int __cam_node_handle_release_dev(struct cam_node *node,
+	struct cam_release_dev_cmd *release)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!release)
+		return -EINVAL;
+
+	if (release->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (release->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
+			release->dev_handle, node->name);
+		return -EINVAL;
+	}
+
+	if (ctx->state > CAM_CTX_UNINIT && ctx->state < CAM_CTX_STATE_MAX) {
+		rc = cam_context_handle_release_dev(ctx, release);
+		if (rc)
+			CAM_ERR(CAM_CORE, "context release failed for node %s",
+				node->name);
+	} else {
+		CAM_WARN(CAM_CORE,
+			"node %s context id %u state %d invalid to release hdl",
+			node->name, ctx->ctx_id, ctx->state);
+		goto destroy_dev_hdl;
+	}
+
+	cam_context_putref(ctx);
+
+destroy_dev_hdl:
+	rc = cam_destroy_device_hdl(release->dev_handle);
+	if (rc)
+		CAM_ERR(CAM_CORE, "destroy device hdl failed for node %s",
+			node->name);
+	else
+		ctx->dev_hdl = -1;
+
+	CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
+		node->name, ctx->ctx_id,
+		atomic_read(&(ctx->refcount.refcount.refs)));
+
+	return rc;
+}
+
+static int __cam_node_handle_release_hw_v1(struct cam_node *node,
+	struct cam_release_hw_cmd_v1 *release)
+{
+	int rc = 0;
+	struct cam_context *ctx = NULL;
+
+	if (!release)
+		return -EINVAL;
+
+	if (release->dev_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid device handle for context");
+		return -EINVAL;
+	}
+
+	if (release->session_handle <= 0) {
+		CAM_ERR(CAM_CORE, "Invalid session handle for context");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *)cam_get_device_priv(release->dev_handle);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d node %s",
+			release->dev_handle, node->name);
+		return -EINVAL;
+	}
+
+	rc = cam_context_handle_release_hw(ctx, release);
+	if (rc)
+		CAM_ERR(CAM_CORE, "context release failed node %s", node->name);
+
+	CAM_DBG(CAM_CORE, "[%s] Release ctx_id=%d, refcount=%d",
+		node->name, ctx->ctx_id,
+		atomic_read(&(ctx->refcount.refcount.refs)));
+
+	return rc;
+}
+
+static int __cam_node_crm_get_dev_info(struct cam_req_mgr_device_info *info)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!info)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(info->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context  for handle %d",
+			info->dev_hdl);
+		return -EINVAL;
+	}
+	return cam_context_handle_crm_get_dev_info(ctx, info);
+}
+
+static int __cam_node_crm_link_setup(
+	struct cam_req_mgr_core_dev_link_setup *setup)
+{
+	int rc;
+	struct cam_context *ctx = NULL;
+
+	if (!setup)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(setup->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			setup->dev_hdl);
+		return -EINVAL;
+	}
+
+	if (setup->link_enable)
+		rc = cam_context_handle_crm_link(ctx, setup);
+	else
+		rc = cam_context_handle_crm_unlink(ctx, setup);
+
+	return rc;
+}
+
+static int __cam_node_crm_apply_req(struct cam_req_mgr_apply_request *apply)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!apply)
+		return -EINVAL;
+
+	ctx = (struct cam_context *) cam_get_device_priv(apply->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			apply->dev_hdl);
+		return -EINVAL;
+	}
+
+	trace_cam_apply_req("Node", apply->request_id);
+
+	return cam_context_handle_crm_apply_req(ctx, apply);
+}
+
+static int __cam_node_crm_flush_req(struct cam_req_mgr_flush_request *flush)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!flush) {
+		CAM_ERR(CAM_CORE, "Invalid flush request payload");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *) cam_get_device_priv(flush->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			flush->dev_hdl);
+		return -EINVAL;
+	}
+
+	return cam_context_handle_crm_flush_req(ctx, flush);
+}
+
+static int __cam_node_crm_process_evt(
+	struct cam_req_mgr_link_evt_data *evt_data)
+{
+	struct cam_context *ctx = NULL;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_CORE, "Invalid process event request payload");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_context *) cam_get_device_priv(evt_data->dev_hdl);
+	if (!ctx) {
+		CAM_ERR(CAM_CORE, "Can not get context for handle %d",
+			evt_data->dev_hdl);
+		return -EINVAL;
+	}
+	return cam_context_handle_crm_process_evt(ctx, evt_data);
+}
+
+int cam_node_deinit(struct cam_node *node)
+{
+	if (node)
+		memset(node, 0, sizeof(*node));
+
+	CAM_DBG(CAM_CORE, "deinit complete");
+
+	return 0;
+}
+
+int cam_node_shutdown(struct cam_node *node)
+{
+	int i = 0;
+	int rc = 0;
+
+	if (!node)
+		return -EINVAL;
+
+	for (i = 0; i < node->ctx_size; i++) {
+		if (node->ctx_list[i].dev_hdl > 0) {
+			CAM_DBG(CAM_CORE,
+				"Node [%s] invoking shutdown on context [%d]",
+				node->name, i);
+			rc = cam_context_shutdown(&(node->ctx_list[i]));
+		}
+	}
+
+	if (node->hw_mgr_intf.hw_close)
+		node->hw_mgr_intf.hw_close(node->hw_mgr_intf.hw_mgr_priv,
+			NULL);
+
+	return 0;
+}
+
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_context *ctx_list, uint32_t ctx_size, char *name)
+{
+	int rc = 0;
+	int i;
+
+	if (!node || !hw_mgr_intf ||
+		sizeof(node->hw_mgr_intf) != sizeof(*hw_mgr_intf)) {
+		return -EINVAL;
+	}
+
+	memset(node, 0, sizeof(*node));
+
+	strlcpy(node->name, name, sizeof(node->name));
+
+	memcpy(&node->hw_mgr_intf, hw_mgr_intf, sizeof(node->hw_mgr_intf));
+	node->crm_node_intf.apply_req = __cam_node_crm_apply_req;
+	node->crm_node_intf.get_dev_info = __cam_node_crm_get_dev_info;
+	node->crm_node_intf.link_setup = __cam_node_crm_link_setup;
+	node->crm_node_intf.flush_req = __cam_node_crm_flush_req;
+	node->crm_node_intf.process_evt = __cam_node_crm_process_evt;
+
+	mutex_init(&node->list_mutex);
+	INIT_LIST_HEAD(&node->free_ctx_list);
+	node->ctx_list = ctx_list;
+	node->ctx_size = ctx_size;
+	for (i = 0; i < ctx_size; i++) {
+		if (!ctx_list[i].state_machine) {
+			CAM_ERR(CAM_CORE,
+				"camera context %d is not initialized", i);
+			rc = -1;
+			goto err;
+		}
+		INIT_LIST_HEAD(&ctx_list[i].list);
+		list_add_tail(&ctx_list[i].list, &node->free_ctx_list);
+		ctx_list[i].node = node;
+	}
+
+	node->state = CAM_NODE_STATE_INIT;
+err:
+	CAM_DBG(CAM_CORE, "Exit. (rc = %d)", rc);
+	return rc;
+}
+
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd)
+{
+	int rc = 0;
+
+	if (!cmd)
+		return -EINVAL;
+
+	CAM_DBG(CAM_CORE, "handle cmd %d", cmd->op_code);
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_query_cap_cmd query;
+
+		if (copy_from_user(&query, u64_to_user_ptr(cmd->handle),
+			sizeof(query))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = __cam_node_handle_query_cap(node, &query);
+		if (rc) {
+			CAM_ERR(CAM_CORE, "querycap is failed(rc = %d)",
+				rc);
+			break;
+		}
+
+		if (copy_to_user(u64_to_user_ptr(cmd->handle), &query,
+			sizeof(query)))
+			rc = -EFAULT;
+
+		break;
+	}
+	case CAM_ACQUIRE_DEV: {
+		struct cam_acquire_dev_cmd acquire;
+
+		if (copy_from_user(&acquire, u64_to_user_ptr(cmd->handle),
+			sizeof(acquire))) {
+			rc = -EFAULT;
+			break;
+		}
+		rc = __cam_node_handle_acquire_dev(node, &acquire);
+		if (rc) {
+			CAM_ERR(CAM_CORE, "acquire device failed(rc = %d)",
+				rc);
+			break;
+		}
+		if (copy_to_user(u64_to_user_ptr(cmd->handle), &acquire,
+			sizeof(acquire)))
+			rc = -EFAULT;
+		break;
+	}
+	case CAM_ACQUIRE_HW: {
+		uint32_t api_version;
+		void *acquire_ptr = NULL;
+		size_t acquire_size;
+
+		if (copy_from_user(&api_version, (void __user *)cmd->handle,
+			sizeof(api_version))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		if (api_version == 1) {
+			acquire_size = sizeof(struct cam_acquire_hw_cmd_v1);
+		} else {
+			CAM_ERR(CAM_CORE, "Unsupported api version %d",
+				api_version);
+			rc = -EINVAL;
+			break;
+		}
+
+		acquire_ptr = kzalloc(acquire_size, GFP_KERNEL);
+		if (!acquire_ptr) {
+			CAM_ERR(CAM_CORE, "No memory for acquire HW");
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(acquire_ptr, (void __user *)cmd->handle,
+			acquire_size)) {
+			rc = -EFAULT;
+			goto acquire_kfree;
+		}
+
+		if (api_version == 1) {
+			rc = __cam_node_handle_acquire_hw_v1(node, acquire_ptr);
+			if (rc) {
+				CAM_ERR(CAM_CORE,
+					"acquire device failed(rc = %d)", rc);
+				goto acquire_kfree;
+			}
+		}
+
+		if (copy_to_user((void __user *)cmd->handle, acquire_ptr,
+			acquire_size))
+			rc = -EFAULT;
+
+acquire_kfree:
+		kfree(acquire_ptr);
+		break;
+	}
+	case CAM_START_DEV: {
+		struct cam_start_stop_dev_cmd start;
+
+		if (copy_from_user(&start, u64_to_user_ptr(cmd->handle),
+			sizeof(start)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_start_dev(node, &start);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"start device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	case CAM_STOP_DEV: {
+		struct cam_start_stop_dev_cmd stop;
+
+		if (copy_from_user(&stop, u64_to_user_ptr(cmd->handle),
+			sizeof(stop)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_stop_dev(node, &stop);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"stop device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	case CAM_CONFIG_DEV: {
+		struct cam_config_dev_cmd config;
+
+		if (copy_from_user(&config, u64_to_user_ptr(cmd->handle),
+			sizeof(config)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_config_dev(node, &config);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"config device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	case CAM_RELEASE_DEV: {
+		struct cam_release_dev_cmd release;
+
+		if (copy_from_user(&release, u64_to_user_ptr(cmd->handle),
+			sizeof(release)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_release_dev(node, &release);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"release device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	case CAM_RELEASE_HW: {
+		uint32_t api_version;
+		size_t release_size;
+		void *release_ptr = NULL;
+
+		if (copy_from_user(&api_version, (void __user *)cmd->handle,
+			sizeof(api_version))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		if (api_version == 1) {
+			release_size = sizeof(struct cam_release_hw_cmd_v1);
+		} else {
+			CAM_ERR(CAM_CORE, "Unsupported api version %d",
+				api_version);
+			rc = -EINVAL;
+			break;
+		}
+
+		release_ptr = kzalloc(release_size, GFP_KERNEL);
+		if (!release_ptr) {
+			CAM_ERR(CAM_CORE, "No memory for release HW");
+			rc = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(release_ptr, (void __user *)cmd->handle,
+			release_size)) {
+			rc = -EFAULT;
+			goto release_kfree;
+		}
+
+		if (api_version == 1) {
+			rc = __cam_node_handle_release_hw_v1(node, release_ptr);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"release device failed(rc = %d)", rc);
+		}
+
+release_kfree:
+		kfree(release_ptr);
+		break;
+	}
+	case CAM_FLUSH_REQ: {
+		struct cam_flush_dev_cmd flush;
+
+		if (copy_from_user(&flush, u64_to_user_ptr(cmd->handle),
+			sizeof(flush)))
+			rc = -EFAULT;
+		else {
+			rc = __cam_node_handle_flush_dev(node, &flush);
+			if (rc)
+				CAM_ERR(CAM_CORE,
+					"flush device failed(rc = %d)", rc);
+		}
+		break;
+	}
+	default:
+		CAM_ERR(CAM_CORE, "Unknown op code %d", cmd->op_code);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_node.h b/drivers/media/platform/msm/camera/cam_core/cam_node.h
new file mode 100644
index 0000000..ea0f838b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_node.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_NODE_H_
+#define _CAM_NODE_H_
+
+#include <linux/kref.h>
+#include "cam_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+#define CAM_NODE_NAME_LENGTH_MAX        256
+
+#define CAM_NODE_STATE_UNINIT           0
+#define CAM_NODE_STATE_INIT             1
+
+/**
+ * struct cam_node - Singleton Node for camera HW devices
+ *
+ * @name:                  Name for struct cam_node
+ * @state:                 Node state:
+ *                            0 = uninitialized, 1 = initialized
+ * @list_mutex:            Mutex for the context pool
+ * @free_ctx_list:         Free context pool list
+ * @ctx_list:              Context list
+ * @ctx_size:              Context list size
+ * @hw_mgr_intf:           Interface for cam_node to HW
+ * @crm_node_intf:         Interface for the CRM to cam_node
+ *
+ */
+struct cam_node {
+	char                         name[CAM_NODE_NAME_LENGTH_MAX];
+	uint32_t                     state;
+
+	/* context pool */
+	struct mutex                 list_mutex;
+	struct list_head             free_ctx_list;
+	struct cam_context          *ctx_list;
+	uint32_t                     ctx_size;
+
+	/* interfaces */
+	struct cam_hw_mgr_intf       hw_mgr_intf;
+	struct cam_req_mgr_kmd_ops   crm_node_intf;
+};
+
+/**
+ * cam_node_handle_ioctl()
+ *
+ * @brief:       Handle ioctl commands
+ *
+ * @node:                  Node handle
+ * @cmd:                   IOCTL command
+ *
+ */
+int cam_node_handle_ioctl(struct cam_node *node, struct cam_control *cmd);
+
+/**
+ * cam_node_deinit()
+ *
+ * @brief:       Deinitialization function for the Node interface
+ *
+ * @node:                  Node handle
+ *
+ */
+int cam_node_deinit(struct cam_node *node);
+
+/**
+ * cam_node_shutdown()
+ *
+ * @brief:       Shutdowns/Closes the cam node.
+ *
+ * @node:                  Cam_node pointer
+ *
+ */
+int cam_node_shutdown(struct cam_node *node);
+
+/**
+ * cam_node_init()
+ *
+ * @brief:       Initialization function for the Node interface.
+ *
+ * @node:                  Cam_node pointer
+ * @hw_mgr_intf:           HW manager interface blob
+ * @ctx_list:              List of cam_contexts to be added
+ * @ctx_size:              Size of the cam_context
+ * @name:                  Name for the node
+ *
+ */
+int cam_node_init(struct cam_node *node, struct cam_hw_mgr_intf *hw_mgr_intf,
+	struct cam_context *ctx_list, uint32_t ctx_size, char *name);
+
+/**
+ * cam_node_put_ctxt_to_free_list()
+ *
+ * @brief:       Put context in node free list.
+ *
+ * @ref:         Context's kref object
+ *
+ */
+void cam_node_put_ctxt_to_free_list(struct kref *ref);
+
+#endif /* _CAM_NODE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_core/cam_subdev.c b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
new file mode 100644
index 0000000..be1dc42
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_core/cam_subdev.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_debug_util.h"
+
+/**
+ * cam_subdev_subscribe_event()
+ *
+ * @brief: function to subscribe to v4l2 events
+ *
+ * @sd:                    Pointer to struct v4l2_subdev.
+ * @fh:                    Pointer to struct v4l2_fh.
+ * @sub:                   Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_subscribe_event(struct v4l2_subdev *sd,
+	struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_subscribe(fh, sub, CAM_SUBDEVICE_EVENT_MAX, NULL);
+}
+
+/**
+ * cam_subdev_unsubscribe_event()
+ *
+ * @brief: function to unsubscribe from v4l2 events
+ *
+ * @sd:                    Pointer to struct v4l2_subdev.
+ * @fh:                    Pointer to struct v4l2_fh.
+ * @sub:                   Pointer to struct v4l2_event_subscription.
+ */
+static int cam_subdev_unsubscribe_event(struct v4l2_subdev *sd,
+	struct v4l2_fh *fh,
+	struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static long cam_subdev_ioctl(struct v4l2_subdev *sd, unsigned int cmd,
+	void *arg)
+{
+	long rc;
+	struct cam_node *node =
+		(struct cam_node *) v4l2_get_subdevdata(sd);
+
+	if (!node || node->state == CAM_NODE_STATE_UNINIT) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_node_handle_ioctl(node,
+			(struct cam_control *) arg);
+		break;
+	default:
+		CAM_ERR(CAM_CORE, "Invalid command %d for %s", cmd,
+			node->name);
+		rc = -EINVAL;
+	}
+end:
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int rc;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_CORE, "Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+	rc = cam_subdev_ioctl(sd, cmd, &cmd_data);
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_CORE,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+#endif
+
+const struct v4l2_subdev_core_ops cam_subdev_core_ops = {
+	.ioctl = cam_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_subdev_compat_ioctl,
+#endif
+	.subscribe_event = cam_subdev_subscribe_event,
+	.unsubscribe_event = cam_subdev_unsubscribe_event,
+};
+
+static const struct v4l2_subdev_ops cam_subdev_ops = {
+	.core = &cam_subdev_core_ops,
+};
+
+int cam_subdev_remove(struct cam_subdev *sd)
+{
+	if (!sd)
+		return -EINVAL;
+
+	cam_unregister_subdev(sd);
+	cam_node_deinit((struct cam_node *)sd->token);
+	kfree(sd->token);
+
+	return 0;
+}
+
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+	char *name, uint32_t dev_type)
+{
+	int rc;
+	struct cam_node *node = NULL;
+
+	if (!sd || !pdev || !name)
+		return -EINVAL;
+
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
+		return -ENOMEM;
+
+	/* Setup camera v4l2 subdevice */
+	sd->pdev = pdev;
+	sd->name = name;
+	sd->ops = &cam_subdev_ops;
+	sd->token = node;
+	sd->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	sd->ent_function = dev_type;
+
+	rc = cam_register_subdev(sd);
+	if (rc) {
+		CAM_ERR(CAM_CORE, "cam_register_subdev() failed for dev: %s",
+			sd->name);
+		goto err;
+	}
+	platform_set_drvdata(sd->pdev, sd);
+	return rc;
+err:
+	kfree(node);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/Makefile b/drivers/media/platform/msm/camera/cam_cpas/Makefile
new file mode 100644
index 0000000..062e9ba
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/cpas_top
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/camss_top
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cpas_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += camss_top/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpas_soc.o cam_cpas_intf.o cam_cpas_hw.o
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
new file mode 100644
index 0000000..0841f36
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.c
@@ -0,0 +1,1703 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include "cam_cpas_hw.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_soc.h"
+
+static uint cam_min_camnoc_ib_bw;
+module_param(cam_min_camnoc_ib_bw, uint, 0644);
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	uint32_t value;
+	int reg_base_index;
+
+	if (reg_info->enable == false)
+		return 0;
+
+	reg_base_index = cpas_core->regbase_index[reg_base];
+	if (reg_base_index == -1)
+		return -EINVAL;
+
+	if (reg_info->masked_value) {
+		value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base +
+			reg_info->offset);
+		value = value & (~reg_info->mask);
+		value = value | (reg_info->value << reg_info->shift);
+	} else {
+		value = reg_info->value;
+	}
+
+	CAM_DBG(CAM_CPAS, "Base[%d] Offset[0x%8x] Value[0x%8x]",
+		reg_base, reg_info->offset, value);
+
+	cam_io_w_mb(value, soc_info->reg_map[reg_base_index].mem_base +
+		reg_info->offset);
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_level(
+	struct cam_cpas_bus_client *bus_client, unsigned int level)
+{
+	if (!bus_client->valid || (bus_client->dyn_vote == true)) {
+		CAM_ERR(CAM_CPAS, "Invalid params %d %d", bus_client->valid,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	if (level >= bus_client->num_usecases) {
+		CAM_ERR(CAM_CPAS, "Invalid vote level=%d, usecases=%d", level,
+			bus_client->num_usecases);
+		return -EINVAL;
+	}
+
+	if (level == bus_client->curr_vote_level)
+		return 0;
+
+	CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] index[%d]",
+		bus_client->client_id, bus_client->name, level);
+	msm_bus_scale_client_update_request(bus_client->client_id, level);
+	bus_client->curr_vote_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_vote_bus_client_bw(
+	struct cam_cpas_bus_client *bus_client, uint64_t ab, uint64_t ib,
+	bool camnoc_bw)
+{
+	struct msm_bus_paths *path;
+	struct msm_bus_scale_pdata *pdata;
+	int idx = 0;
+	uint64_t min_camnoc_ib_bw = CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+
+	if (cam_min_camnoc_ib_bw > 0)
+		min_camnoc_ib_bw = (uint64_t)cam_min_camnoc_ib_bw * 1000000L;
+
+	CAM_DBG(CAM_CPAS, "cam_min_camnoc_ib_bw = %d, min_camnoc_ib_bw=%llu",
+		cam_min_camnoc_ib_bw, min_camnoc_ib_bw);
+
+	if (!bus_client->valid) {
+		CAM_ERR(CAM_CPAS, "bus client not valid");
+		return -EINVAL;
+	}
+
+	if ((bus_client->num_usecases != 2) ||
+		(bus_client->num_paths != 1) ||
+		(bus_client->dyn_vote != true)) {
+		CAM_ERR(CAM_CPAS, "dynamic update not allowed %d %d %d",
+			bus_client->num_usecases, bus_client->num_paths,
+			bus_client->dyn_vote);
+		return -EINVAL;
+	}
+
+	mutex_lock(&bus_client->lock);
+
+	if (bus_client->curr_vote_level > 1) {
+		CAM_ERR(CAM_CPAS, "curr_vote_level %d cannot be greater than 1",
+			bus_client->curr_vote_level);
+		mutex_unlock(&bus_client->lock);
+		return -EINVAL;
+	}
+
+	idx = bus_client->curr_vote_level;
+	idx = 1 - idx;
+	bus_client->curr_vote_level = idx;
+	mutex_unlock(&bus_client->lock);
+
+	if (camnoc_bw == true) {
+		if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_CAMNOC_AB_BW))
+			ab = CAM_CPAS_AXI_MIN_CAMNOC_AB_BW;
+
+		if ((ib > 0) && (ib < min_camnoc_ib_bw))
+			ib = min_camnoc_ib_bw;
+	} else {
+		if ((ab > 0) && (ab < CAM_CPAS_AXI_MIN_MNOC_AB_BW))
+			ab = CAM_CPAS_AXI_MIN_MNOC_AB_BW;
+
+		if ((ib > 0) && (ib < CAM_CPAS_AXI_MIN_MNOC_IB_BW))
+			ib = CAM_CPAS_AXI_MIN_MNOC_IB_BW;
+	}
+
+	pdata = bus_client->pdata;
+	path = &(pdata->usecase[idx]);
+	path->vectors[0].ab = ab;
+	path->vectors[0].ib = ib;
+
+	CAM_DBG(CAM_CPAS, "Bus client=[%d][%s] :ab[%llu] ib[%llu], index[%d]",
+		bus_client->client_id, bus_client->name, ab, ib, idx);
+	msm_bus_scale_client_update_request(bus_client->client_id, idx);
+
+	return 0;
+}
+
+static int cam_cpas_util_register_bus_client(
+	struct cam_hw_soc_info *soc_info, struct device_node *dev_node,
+	struct cam_cpas_bus_client *bus_client)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+	uint32_t client_id;
+	int rc;
+
+	pdata = msm_bus_pdata_from_node(soc_info->pdev,
+		dev_node);
+	if (!pdata) {
+		CAM_ERR(CAM_CPAS, "failed get_pdata");
+		return -EINVAL;
+	}
+
+	if ((pdata->num_usecases == 0) ||
+		(pdata->usecase[0].num_paths == 0)) {
+		CAM_ERR(CAM_CPAS, "usecase=%d", pdata->num_usecases);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	client_id = msm_bus_scale_register_client(pdata);
+	if (!client_id) {
+		CAM_ERR(CAM_CPAS, "failed in register ahb bus client");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	bus_client->dyn_vote = of_property_read_bool(dev_node,
+		"qcom,msm-bus-vector-dyn-vote");
+
+	if (bus_client->dyn_vote && (pdata->num_usecases != 2)) {
+		CAM_ERR(CAM_CPAS, "Excess or less vectors %d",
+			pdata->num_usecases);
+		rc = -EINVAL;
+		goto fail_unregister_client;
+	}
+
+	msm_bus_scale_client_update_request(client_id, 0);
+
+	bus_client->src = pdata->usecase[0].vectors[0].src;
+	bus_client->dst = pdata->usecase[0].vectors[0].dst;
+	bus_client->pdata = pdata;
+	bus_client->client_id = client_id;
+	bus_client->num_usecases = pdata->num_usecases;
+	bus_client->num_paths = pdata->usecase[0].num_paths;
+	bus_client->curr_vote_level = 0;
+	bus_client->valid = true;
+	bus_client->name = pdata->name;
+	mutex_init(&bus_client->lock);
+
+	CAM_DBG(CAM_CPAS, "Bus Client=[%d][%s] : src=%d, dst=%d",
+		bus_client->client_id, bus_client->name,
+		bus_client->src, bus_client->dst);
+
+	return 0;
+fail_unregister_client:
+	msm_bus_scale_unregister_client(bus_client->client_id);
+error:
+	return rc;
+
+}
+
+static int cam_cpas_util_unregister_bus_client(
+	struct cam_cpas_bus_client *bus_client)
+{
+	if (!bus_client->valid)
+		return -EINVAL;
+
+	if (bus_client->dyn_vote)
+		cam_cpas_util_vote_bus_client_bw(bus_client, 0, 0, false);
+	else
+		cam_cpas_util_vote_bus_client_level(bus_client, 0);
+
+	msm_bus_scale_unregister_client(bus_client->client_id);
+	bus_client->valid = false;
+
+	mutex_destroy(&bus_client->lock);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_cleanup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *)soc_info->soc_private;
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		cam_cpas_util_unregister_bus_client(&curr_port->mnoc_bus);
+		of_node_put(curr_port->axi_port_mnoc_node);
+		if (soc_private->axi_camnoc_based) {
+			cam_cpas_util_unregister_bus_client(
+				&curr_port->camnoc_bus);
+			of_node_put(curr_port->axi_port_camnoc_node);
+		}
+		of_node_put(curr_port->axi_port_node);
+		list_del(&curr_port->sibling_port);
+		mutex_destroy(&curr_port->lock);
+		kfree(curr_port);
+	}
+
+	of_node_put(soc_private->axi_port_list_node);
+
+	return 0;
+}
+
+static int cam_cpas_util_axi_setup(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *)soc_info->soc_private;
+	struct cam_cpas_axi_port *axi_port;
+	int rc;
+	struct device_node *axi_port_list_node;
+	struct device_node *axi_port_node = NULL;
+	struct device_node *axi_port_mnoc_node = NULL;
+	struct device_node *axi_port_camnoc_node = NULL;
+
+	INIT_LIST_HEAD(&cpas_core->axi_ports_list_head);
+
+	axi_port_list_node = of_find_node_by_name(soc_info->pdev->dev.of_node,
+		"qcom,axi-port-list");
+	if (!axi_port_list_node) {
+		CAM_ERR(CAM_CPAS, "Node qcom,axi-port-list not found.");
+		return -EINVAL;
+	}
+
+	soc_private->axi_port_list_node = axi_port_list_node;
+
+	for_each_available_child_of_node(axi_port_list_node, axi_port_node) {
+		axi_port = kzalloc(sizeof(*axi_port), GFP_KERNEL);
+		if (!axi_port) {
+			rc = -ENOMEM;
+			goto error_previous_axi_cleanup;
+		}
+		axi_port->axi_port_node = axi_port_node;
+
+		rc = of_property_read_string_index(axi_port_node,
+			"qcom,axi-port-name", 0,
+			(const char **)&axi_port->axi_port_name);
+		if (rc) {
+			CAM_ERR(CAM_CPAS,
+				"failed to read qcom,axi-port-name rc=%d", rc);
+			goto port_name_fail;
+		}
+
+		axi_port_mnoc_node = of_find_node_by_name(axi_port_node,
+			"qcom,axi-port-mnoc");
+		if (!axi_port_mnoc_node) {
+			CAM_ERR(CAM_CPAS, "Node qcom,axi-port-mnoc not found.");
+			rc = -EINVAL;
+			goto mnoc_node_get_fail;
+		}
+		axi_port->axi_port_mnoc_node = axi_port_mnoc_node;
+
+		rc = cam_cpas_util_register_bus_client(soc_info,
+			axi_port_mnoc_node, &axi_port->mnoc_bus);
+		if (rc)
+			goto mnoc_register_fail;
+
+		if (soc_private->axi_camnoc_based) {
+			axi_port_camnoc_node = of_find_node_by_name(
+				axi_port_node, "qcom,axi-port-camnoc");
+			if (!axi_port_camnoc_node) {
+				CAM_ERR(CAM_CPAS,
+					"Node qcom,axi-port-camnoc not found");
+				rc = -EINVAL;
+				goto camnoc_node_get_fail;
+			}
+			axi_port->axi_port_camnoc_node = axi_port_camnoc_node;
+
+			rc = cam_cpas_util_register_bus_client(soc_info,
+				axi_port_camnoc_node, &axi_port->camnoc_bus);
+			if (rc)
+				goto camnoc_register_fail;
+		}
+
+		mutex_init(&axi_port->lock);
+
+		INIT_LIST_HEAD(&axi_port->sibling_port);
+		list_add_tail(&axi_port->sibling_port,
+			&cpas_core->axi_ports_list_head);
+		INIT_LIST_HEAD(&axi_port->clients_list_head);
+	}
+
+	return 0;
+camnoc_register_fail:
+	of_node_put(axi_port->axi_port_camnoc_node);
+camnoc_node_get_fail:
+	cam_cpas_util_unregister_bus_client(&axi_port->mnoc_bus);
+mnoc_register_fail:
+	of_node_put(axi_port->axi_port_mnoc_node);
+mnoc_node_get_fail:
+port_name_fail:
+	of_node_put(axi_port->axi_port_node);
+	kfree(axi_port);
+error_previous_axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, soc_info);
+	return rc;
+}
+
+static int cam_cpas_util_vote_default_ahb_axi(struct cam_hw_info *cpas_hw,
+	int enable)
+{
+	int rc;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+	uint64_t camnoc_bw, mnoc_bw;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	rc = cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		(enable == true) ? CAM_SVS_VOTE : CAM_SUSPEND_VOTE);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "Failed in AHB vote, enable=%d, rc=%d",
+			enable, rc);
+		return rc;
+	}
+
+	if (enable) {
+		mnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		camnoc_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	} else {
+		mnoc_bw = 0;
+		camnoc_bw = 0;
+	}
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		rc = cam_cpas_util_vote_bus_client_bw(&curr_port->mnoc_bus,
+			mnoc_bw, mnoc_bw, false);
+		if (rc) {
+			CAM_ERR(CAM_CPAS,
+				"Failed in mnoc vote, enable=%d, rc=%d",
+				enable, rc);
+			goto remove_ahb_vote;
+		}
+
+		if (soc_private->axi_camnoc_based) {
+			cam_cpas_util_vote_bus_client_bw(
+				&curr_port->camnoc_bus, 0, camnoc_bw, true);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"Failed in mnoc vote, enable=%d, %d",
+					enable, rc);
+				cam_cpas_util_vote_bus_client_bw(
+					&curr_port->mnoc_bus, 0, 0, false);
+				goto remove_ahb_vote;
+			}
+		}
+	}
+
+	return 0;
+remove_ahb_vote:
+	cam_cpas_util_vote_bus_client_level(&cpas_core->ahb_bus_client,
+		CAM_SUSPEND_VOTE);
+	return rc;
+}
+
+static int cam_cpas_util_insert_client_to_axi_port(struct cam_cpas *cpas_core,
+	struct cam_cpas_private_soc *soc_private,
+	struct cam_cpas_client *cpas_client, int32_t client_indx)
+{
+	struct cam_cpas_axi_port *curr_port;
+	struct cam_cpas_axi_port *temp_port;
+
+	list_for_each_entry_safe(curr_port, temp_port,
+		&cpas_core->axi_ports_list_head, sibling_port) {
+		if (strnstr(curr_port->axi_port_name,
+			soc_private->client_axi_port_name[client_indx],
+			strlen(curr_port->axi_port_name))) {
+
+			cpas_client->axi_port = curr_port;
+			INIT_LIST_HEAD(&cpas_client->axi_sibling_client);
+
+			mutex_lock(&curr_port->lock);
+			list_add_tail(&cpas_client->axi_sibling_client,
+				&cpas_client->axi_port->clients_list_head);
+			mutex_unlock(&curr_port->lock);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void cam_cpas_util_remove_client_from_axi_port(
+	struct cam_cpas_client *cpas_client)
+{
+	mutex_lock(&cpas_client->axi_port->lock);
+	list_del(&cpas_client->axi_sibling_client);
+	mutex_unlock(&cpas_client->axi_port->lock);
+}
+
+static int cam_cpas_hw_reg_write(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		CAM_ERR(CAM_CPAS,
+			"Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		cam_io_w_mb(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		cam_io_w(value,
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_hw_reg_read(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, enum cam_cpas_reg_base reg_base,
+	uint32_t offset, bool mb, uint32_t *value)
+{
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	int reg_base_index = cpas_core->regbase_index[reg_base];
+	uint32_t reg_value;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!value)
+		return -EINVAL;
+
+	if (reg_base_index < 0 || reg_base_index >= soc_info->num_reg_map) {
+		CAM_ERR(CAM_CPAS,
+			"Invalid reg_base=%d, reg_base_index=%d, num_map=%d",
+			reg_base, reg_base_index, soc_info->num_reg_map);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	if (mb)
+		reg_value = cam_io_r_mb(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+	else
+		reg_value = cam_io_r(
+			soc_info->reg_map[reg_base_index].mem_base + offset);
+
+	*value = reg_value;
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	return rc;
+}
+
+static int cam_cpas_util_set_camnoc_axi_clk_rate(
+	struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+	struct cam_axi_vote consolidated_axi_vote;
+	int rc = 0;
+
+	CAM_DBG(CAM_CPAS, "control_camnoc_axi_clk=%d",
+		soc_private->control_camnoc_axi_clk);
+
+	if (soc_private->control_camnoc_axi_clk) {
+		struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+		struct cam_cpas_axi_port *curr_axi_port = NULL;
+		struct cam_cpas_axi_port *temp_axi_port = NULL;
+		uint64_t required_camnoc_bw = 0;
+		int32_t clk_rate = 0;
+
+		list_for_each_entry_safe(curr_axi_port, temp_axi_port,
+			&cpas_core->axi_ports_list_head, sibling_port) {
+			consolidated_axi_vote =
+				curr_axi_port->consolidated_axi_vote;
+
+			if (consolidated_axi_vote.uncompressed_bw
+				> required_camnoc_bw)
+				required_camnoc_bw =
+					consolidated_axi_vote.uncompressed_bw;
+
+			CAM_DBG(CAM_CPAS, "[%s] : curr=%llu, overal=%llu",
+				curr_axi_port->axi_port_name,
+				consolidated_axi_vote.uncompressed_bw,
+				required_camnoc_bw);
+		}
+
+		required_camnoc_bw += (required_camnoc_bw *
+			soc_private->camnoc_axi_clk_bw_margin) / 100;
+
+		if ((required_camnoc_bw > 0) &&
+			(required_camnoc_bw <
+			soc_private->camnoc_axi_min_ib_bw))
+			required_camnoc_bw = soc_private->camnoc_axi_min_ib_bw;
+
+		clk_rate = required_camnoc_bw / soc_private->camnoc_bus_width;
+
+		CAM_DBG(CAM_CPAS, "Setting camnoc axi clk rate : %llu %d",
+			required_camnoc_bw, clk_rate);
+
+		rc = cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+		if (rc)
+			CAM_ERR(CAM_CPAS,
+				"Failed in setting camnoc axi clk %llu %d %d",
+				required_camnoc_bw, clk_rate, rc);
+	}
+
+	return rc;
+}
+
+static int cam_cpas_util_apply_client_axi_vote(
+	struct cam_hw_info *cpas_hw,
+	struct cam_cpas_client *cpas_client,
+	struct cam_axi_vote *axi_vote)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+	struct cam_cpas_client *curr_client;
+	struct cam_cpas_client *temp_client;
+	struct cam_axi_vote req_axi_vote = *axi_vote;
+	struct cam_cpas_axi_port *axi_port = cpas_client->axi_port;
+	uint64_t camnoc_bw = 0, mnoc_bw = 0;
+	int rc = 0;
+
+	if (!axi_port) {
+		CAM_ERR(CAM_CPAS, "axi port does not exists");
+		return -EINVAL;
+	}
+
+	/*
+	 * Make sure we use same bw for both compressed, uncompressed
+	 * in case client has requested either of one only
+	 */
+	if (req_axi_vote.compressed_bw == 0)
+		req_axi_vote.compressed_bw = req_axi_vote.uncompressed_bw;
+
+	if (req_axi_vote.uncompressed_bw == 0)
+		req_axi_vote.uncompressed_bw = req_axi_vote.compressed_bw;
+
+	if ((cpas_client->axi_vote.compressed_bw ==
+		req_axi_vote.compressed_bw) &&
+		(cpas_client->axi_vote.uncompressed_bw ==
+		req_axi_vote.uncompressed_bw))
+		return 0;
+
+	mutex_lock(&axi_port->lock);
+	cpas_client->axi_vote = req_axi_vote;
+
+	list_for_each_entry_safe(curr_client, temp_client,
+		&axi_port->clients_list_head, axi_sibling_client) {
+		camnoc_bw += curr_client->axi_vote.uncompressed_bw;
+		mnoc_bw += curr_client->axi_vote.compressed_bw;
+	}
+
+	if ((!soc_private->axi_camnoc_based) && (mnoc_bw < camnoc_bw))
+		mnoc_bw = camnoc_bw;
+
+	axi_port->consolidated_axi_vote.compressed_bw = mnoc_bw;
+	axi_port->consolidated_axi_vote.uncompressed_bw = camnoc_bw;
+
+	CAM_DBG(CAM_CPAS,
+		"axi[(%d, %d),(%d, %d)] : camnoc_bw[%llu], mnoc_bw[%llu]",
+		axi_port->mnoc_bus.src, axi_port->mnoc_bus.dst,
+		axi_port->camnoc_bus.src, axi_port->camnoc_bus.dst,
+		camnoc_bw, mnoc_bw);
+
+	rc = cam_cpas_util_vote_bus_client_bw(&axi_port->mnoc_bus,
+		mnoc_bw, mnoc_bw, false);
+	if (rc) {
+		CAM_ERR(CAM_CPAS,
+			"Failed in mnoc vote ab[%llu] ib[%llu] rc=%d",
+			mnoc_bw, mnoc_bw, rc);
+		goto unlock_axi_port;
+	}
+
+	if (soc_private->axi_camnoc_based) {
+		rc = cam_cpas_util_vote_bus_client_bw(&axi_port->camnoc_bus,
+			0, camnoc_bw, true);
+		if (rc) {
+			CAM_ERR(CAM_CPAS,
+				"Failed camnoc vote ab[%llu] ib[%llu] rc=%d",
+				(uint64_t)0, camnoc_bw, rc);
+			goto unlock_axi_port;
+		}
+	}
+
+	mutex_unlock(&axi_port->lock);
+
+	rc = cam_cpas_util_set_camnoc_axi_clk_rate(cpas_hw);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "Failed in setting axi clk rate rc=%d", rc);
+
+	return rc;
+
+unlock_axi_port:
+	mutex_unlock(&axi_port->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_axi_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_axi_vote *client_axi_vote)
+{
+	struct cam_axi_vote axi_vote;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!client_axi_vote) {
+		CAM_ERR(CAM_CPAS, "Invalid arg client_handle=%d",
+			client_handle);
+		return -EINVAL;
+	}
+
+	axi_vote = *client_axi_vote;
+
+	if ((axi_vote.compressed_bw == 0) &&
+		(axi_vote.uncompressed_bw == 0)) {
+		CAM_DBG(CAM_CPAS, "0 vote from client_handle=%d",
+			client_handle);
+		axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CAM_DBG(CAM_PERF,
+		"Client=[%d][%s][%d] Requested compressed[%llu], uncompressed[%llu]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, axi_vote.compressed_bw,
+		axi_vote.uncompressed_bw);
+
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+		cpas_core->cpas_client[client_indx], &axi_vote);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_util_get_ahb_level(struct cam_hw_info *cpas_hw,
+	struct device *dev, unsigned long freq, enum cam_vote_level *req_level)
+{
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+	struct dev_pm_opp *opp;
+	unsigned int corner;
+	enum cam_vote_level level = CAM_SVS_VOTE;
+	unsigned long corner_freq = freq;
+	int i;
+
+	if (!dev || !req_level) {
+		CAM_ERR(CAM_CPAS, "Invalid params %pK, %pK", dev, req_level);
+		return -EINVAL;
+	}
+
+	opp = dev_pm_opp_find_freq_ceil(dev, &corner_freq);
+	if (IS_ERR(opp)) {
+		CAM_DBG(CAM_CPAS, "OPP Ceil not available for freq :%ld, %pK",
+			corner_freq, opp);
+		*req_level = CAM_TURBO_VOTE;
+		return 0;
+	}
+
+	corner = dev_pm_opp_get_voltage(opp);
+
+	for (i = 0; i < soc_private->num_vdd_ahb_mapping; i++)
+		if (corner == soc_private->vdd_ahb[i].vdd_corner)
+			level = soc_private->vdd_ahb[i].ahb_level;
+
+	CAM_DBG(CAM_CPAS,
+		"From OPP table : freq=[%ld][%ld], corner=%d, level=%d",
+		freq, corner_freq, corner, level);
+
+	*req_level = level;
+
+	return 0;
+}
+
+static int cam_cpas_util_apply_client_ahb_vote(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_client *cpas_client, struct cam_ahb_vote *ahb_vote,
+	enum cam_vote_level *applied_level)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_bus_client *ahb_bus_client = &cpas_core->ahb_bus_client;
+	enum cam_vote_level required_level;
+	enum cam_vote_level highest_level;
+	int i, rc = 0;
+
+	if (!ahb_bus_client->valid) {
+		CAM_ERR(CAM_CPAS, "AHB Bus client not valid");
+		return -EINVAL;
+	}
+
+	if (ahb_vote->type == CAM_VOTE_DYNAMIC) {
+		rc = cam_cpas_util_get_ahb_level(cpas_hw, cpas_client->data.dev,
+			ahb_vote->vote.freq, &required_level);
+		if (rc)
+			return rc;
+	} else {
+		required_level = ahb_vote->vote.level;
+	}
+
+	if (cpas_client->ahb_level == required_level)
+		return 0;
+
+	mutex_lock(&ahb_bus_client->lock);
+	cpas_client->ahb_level = required_level;
+
+	CAM_DBG(CAM_CPAS, "Client=[%d][%s] required level[%d], curr_level[%d]",
+		ahb_bus_client->client_id, ahb_bus_client->name,
+		required_level, ahb_bus_client->curr_vote_level);
+
+	if (required_level == ahb_bus_client->curr_vote_level)
+		goto unlock_bus_client;
+
+	highest_level = required_level;
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (cpas_core->cpas_client[i] && (highest_level <
+			cpas_core->cpas_client[i]->ahb_level))
+			highest_level = cpas_core->cpas_client[i]->ahb_level;
+	}
+
+	CAM_DBG(CAM_CPAS, "Required highest_level[%d]", highest_level);
+
+	rc = cam_cpas_util_vote_bus_client_level(ahb_bus_client,
+		highest_level);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "Failed in ahb vote, level=%d, rc=%d",
+			highest_level, rc);
+		goto unlock_bus_client;
+	}
+
+	rc = cam_soc_util_set_clk_rate_level(&cpas_hw->soc_info, highest_level);
+	if (rc) {
+		CAM_ERR(CAM_CPAS,
+			"Failed in scaling clock rate level %d for AHB",
+			highest_level);
+		goto unlock_bus_client;
+	}
+
+	if (applied_level)
+		*applied_level = highest_level;
+
+unlock_bus_client:
+	mutex_unlock(&ahb_bus_client->lock);
+	return rc;
+}
+
+static int cam_cpas_hw_update_ahb_vote(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle, struct cam_ahb_vote *client_ahb_vote)
+{
+	struct cam_ahb_vote ahb_vote;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!client_ahb_vote) {
+		CAM_ERR(CAM_CPAS, "Invalid input arg");
+		return -EINVAL;
+	}
+
+	ahb_vote = *client_ahb_vote;
+
+	if (ahb_vote.vote.level == 0) {
+		CAM_DBG(CAM_CPAS, "0 ahb vote from client %d",
+			client_handle);
+		ahb_vote.type = CAM_VOTE_ABSOLUTE;
+		ahb_vote.vote.level = CAM_SVS_VOTE;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] has not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto unlock_client;
+	}
+
+	CAM_DBG(CAM_PERF,
+		"client=[%d][%s][%d] : type[%d], level[%d], freq[%ld], applied[%d]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, ahb_vote.type,
+		ahb_vote.vote.level, ahb_vote.vote.freq,
+		cpas_core->cpas_client[client_indx]->ahb_level);
+
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw,
+		cpas_core->cpas_client[client_indx], &ahb_vote, NULL);
+
+unlock_client:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_start(void *hw_priv, void *start_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_start *cmd_hw_start;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+	enum cam_vote_level applied_level = CAM_SVS_VOTE;
+	int rc;
+	struct cam_cpas_private_soc *soc_private = NULL;
+
+	if (!hw_priv || !start_args) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, start_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_start) != arg_size) {
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+			sizeof(struct cam_cpas_hw_cmd_start), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cmd_hw_start = (struct cam_cpas_hw_cmd_start *)start_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_start->client_handle);
+	ahb_vote = cmd_hw_start->ahb_vote;
+	axi_vote = cmd_hw_start->axi_vote;
+
+	if (!ahb_vote || !axi_vote)
+		return -EINVAL;
+
+	if ((ahb_vote->vote.level == 0) || ((axi_vote->compressed_bw == 0) &&
+		(axi_vote->uncompressed_bw == 0))) {
+		CAM_ERR(CAM_CPAS, "Invalid vote ahb[%d], axi[%llu], [%llu]",
+			ahb_vote->vote.level, axi_vote->compressed_bw,
+			axi_vote->uncompressed_bw);
+		return -EINVAL;
+	}
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d] is not registered",
+			client_indx);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "client=[%d][%s][%d] is in start state",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto done;
+	}
+
+	CAM_DBG(CAM_CPAS,
+		"AHB :client=[%d][%s][%d] type[%d], level[%d], applied[%d]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index,
+		ahb_vote->type, ahb_vote->vote.level, cpas_client->ahb_level);
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
+		ahb_vote, &applied_level);
+	if (rc)
+		goto done;
+
+	CAM_DBG(CAM_CPAS,
+		"AXI client=[%d][%s][%d] compressed_bw[%llu], uncompressed_bw[%llu]",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, axi_vote->compressed_bw,
+		axi_vote->uncompressed_bw);
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+		cpas_client, axi_vote);
+	if (rc)
+		goto done;
+
+	if (cpas_core->streamon_clients == 0) {
+		atomic_set(&cpas_core->irq_count, 1);
+		rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info,
+			applied_level);
+		if (rc) {
+			atomic_set(&cpas_core->irq_count, 0);
+			CAM_ERR(CAM_CPAS, "enable_resorce failed, rc=%d", rc);
+			goto done;
+		}
+
+		if (cpas_core->internal_ops.power_on) {
+			rc = cpas_core->internal_ops.power_on(cpas_hw);
+			if (rc) {
+				atomic_set(&cpas_core->irq_count, 0);
+				cam_cpas_soc_disable_resources(
+					&cpas_hw->soc_info, true, true);
+				CAM_ERR(CAM_CPAS,
+					"failed in power_on settings rc=%d",
+					rc);
+				goto done;
+			}
+		}
+		CAM_DBG(CAM_CPAS, "irq_count=%d\n",
+			atomic_read(&cpas_core->irq_count));
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	}
+
+	cpas_client->started = true;
+	cpas_core->streamon_clients++;
+
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d] streamon_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->streamon_clients);
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int _check_irq_count(struct cam_cpas *cpas_core)
+{
+	return (atomic_read(&cpas_core->irq_count) > 0) ? 0 : 1;
+}
+
+static int cam_cpas_hw_stop(void *hw_priv, void *stop_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	uint32_t client_indx;
+	struct cam_cpas_hw_cmd_stop *cmd_hw_stop;
+	struct cam_cpas_client *cpas_client;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	struct cam_cpas_private_soc *soc_private = NULL;
+	int rc = 0;
+	long result;
+
+	if (!hw_priv || !stop_args) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, stop_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_cmd_stop) != arg_size) {
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+			sizeof(struct cam_cpas_hw_cmd_stop), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cmd_hw_stop = (struct cam_cpas_hw_cmd_stop *)stop_args;
+	client_indx = CAM_CPAS_GET_CLIENT_IDX(cmd_hw_stop->client_handle);
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	CAM_DBG(CAM_CPAS, "Client=[%d][%s][%d] streamon_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->streamon_clients);
+
+	if (!CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not started",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cpas_client->started = false;
+	cpas_core->streamon_clients--;
+
+	if (cpas_core->streamon_clients == 0) {
+		if (cpas_core->internal_ops.power_off) {
+			rc = cpas_core->internal_ops.power_off(cpas_hw);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"failed in power_off settings rc=%d",
+					rc);
+				/* Do not return error, passthrough */
+			}
+		}
+
+		rc = cam_cpas_soc_disable_irq(&cpas_hw->soc_info);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "disable_irq failed, rc=%d", rc);
+			goto done;
+		}
+
+		/* Wait for any IRQs still being handled */
+		atomic_dec(&cpas_core->irq_count);
+		result = wait_event_timeout(cpas_core->irq_count_wq,
+			_check_irq_count(cpas_core), HZ);
+		if (result == 0) {
+			CAM_ERR(CAM_CPAS, "Wait failed: irq_count=%d",
+				atomic_read(&cpas_core->irq_count));
+		}
+
+		rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info,
+			true, false);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "disable_resorce failed, rc=%d", rc);
+			goto done;
+		}
+		CAM_DBG(CAM_CPAS, "Disabled all the resources: irq_count=%d\n",
+			atomic_read(&cpas_core->irq_count));
+		cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SUSPEND_VOTE;
+	rc = cam_cpas_util_apply_client_ahb_vote(cpas_hw, cpas_client,
+		&ahb_vote, NULL);
+	if (rc)
+		goto done;
+
+	axi_vote.uncompressed_bw = 0;
+	axi_vote.compressed_bw = 0;
+	rc = cam_cpas_util_apply_client_axi_vote(cpas_hw,
+		cpas_client, &axi_vote);
+
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_init(void *hw_priv, void *init_hw_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	int rc = 0;
+
+	if (!hw_priv || !init_hw_args) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, init_hw_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		CAM_ERR(CAM_CPAS, "INIT HW size mismatch %zd %d",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_core->internal_ops.init_hw_version) {
+		rc = cpas_core->internal_ops.init_hw_version(cpas_hw,
+			(struct cam_cpas_hw_caps *)init_hw_args);
+	}
+
+	return rc;
+}
+
+static int cam_cpas_hw_register_client(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+	struct cam_cpas_client *cpas_client;
+	char client_name[CAM_HW_IDENTIFIER_LENGTH + 3];
+	int32_t client_indx = -1;
+	struct cam_cpas *cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+	struct cam_cpas_private_soc *soc_private =
+		(struct cam_cpas_private_soc *) cpas_hw->soc_info.soc_private;
+
+	CAM_DBG(CAM_CPAS, "Register params : identifier=%s, cell_index=%d",
+		register_params->identifier, register_params->cell_index);
+
+	if (soc_private->client_id_based)
+		snprintf(client_name, sizeof(client_name), "%s%d",
+			register_params->identifier,
+			register_params->cell_index);
+	else
+		snprintf(client_name, sizeof(client_name), "%s",
+			register_params->identifier);
+
+	mutex_lock(&cpas_hw->hw_mutex);
+
+	rc = cam_common_util_get_string_index(soc_private->client_name,
+		soc_private->num_clients, client_name, &client_indx);
+
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+
+	if (rc || !CAM_CPAS_CLIENT_VALID(client_indx) ||
+		CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS,
+			"Inval client %s %d : %d %d %pK %d",
+			register_params->identifier,
+			register_params->cell_index,
+			CAM_CPAS_CLIENT_VALID(client_indx),
+			CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx),
+			cpas_core->cpas_client[client_indx], rc);
+		mutex_unlock(&cpas_core->client_mutex[client_indx]);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EPERM;
+	}
+
+	cpas_client = kzalloc(sizeof(struct cam_cpas_client), GFP_KERNEL);
+	if (!cpas_client) {
+		mutex_unlock(&cpas_core->client_mutex[client_indx]);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -ENOMEM;
+	}
+
+	rc = cam_cpas_util_insert_client_to_axi_port(cpas_core, soc_private,
+		cpas_client, client_indx);
+	if (rc) {
+		CAM_ERR(CAM_CPAS,
+			"axi_port_insert failed Client=[%d][%s][%d], rc=%d",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index, rc);
+		kfree(cpas_client);
+		mutex_unlock(&cpas_core->client_mutex[client_indx]);
+		mutex_unlock(&cpas_hw->hw_mutex);
+		return -EINVAL;
+	}
+
+	register_params->client_handle =
+		CAM_CPAS_GET_CLIENT_HANDLE(client_indx);
+	memcpy(&cpas_client->data, register_params,
+		sizeof(struct cam_cpas_register_params));
+	cpas_core->cpas_client[client_indx] = cpas_client;
+	cpas_core->registered_clients++;
+
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->registered_clients);
+
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+
+	return 0;
+}
+
+static int cam_cpas_hw_unregister_client(struct cam_hw_info *cpas_hw,
+	uint32_t client_handle)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_cpas_client *cpas_client = NULL;
+	uint32_t client_indx = CAM_CPAS_GET_CLIENT_IDX(client_handle);
+	int rc = 0;
+
+	if (!CAM_CPAS_CLIENT_VALID(client_indx))
+		return -EINVAL;
+
+	mutex_lock(&cpas_hw->hw_mutex);
+	mutex_lock(&cpas_core->client_mutex[client_indx]);
+	cpas_client = cpas_core->cpas_client[client_indx];
+
+	if (!CAM_CPAS_CLIENT_REGISTERED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] not registered",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto done;
+	}
+
+	if (CAM_CPAS_CLIENT_STARTED(cpas_core, client_indx)) {
+		CAM_ERR(CAM_CPAS, "Client=[%d][%s][%d] is not stopped",
+			client_indx, cpas_client->data.identifier,
+			cpas_client->data.cell_index);
+		rc = -EPERM;
+		goto done;
+	}
+
+	cam_cpas_util_remove_client_from_axi_port(
+		cpas_core->cpas_client[client_indx]);
+
+	CAM_DBG(CAM_CPAS, "client=[%d][%s][%d], registered_clients=%d",
+		client_indx, cpas_client->data.identifier,
+		cpas_client->data.cell_index, cpas_core->registered_clients);
+
+	kfree(cpas_core->cpas_client[client_indx]);
+	cpas_core->cpas_client[client_indx] = NULL;
+	cpas_core->registered_clients--;
+done:
+	mutex_unlock(&cpas_core->client_mutex[client_indx]);
+	mutex_unlock(&cpas_hw->hw_mutex);
+	return rc;
+}
+
+static int cam_cpas_hw_get_hw_info(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_cpas_hw_caps *hw_caps;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK",
+			hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_cpas_hw_caps) != arg_size) {
+		CAM_ERR(CAM_CPAS, "HW_CAPS size mismatch %zd %d",
+			sizeof(struct cam_cpas_hw_caps), arg_size);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)hw_priv;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	hw_caps = (struct cam_cpas_hw_caps *)get_hw_cap_args;
+
+	*hw_caps = cpas_core->hw_caps;
+
+	return 0;
+}
+
+
+static int cam_cpas_hw_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!hw_priv || !cmd_args ||
+		(cmd_type >= CAM_CPAS_HW_CMD_INVALID)) {
+		CAM_ERR(CAM_CPAS, "Invalid arguments %pK %pK %d",
+			hw_priv, cmd_args, cmd_type);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_CPAS_HW_CMD_REGISTER_CLIENT: {
+		struct cam_cpas_register_params *register_params;
+
+		if (sizeof(struct cam_cpas_register_params) != arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		register_params = (struct cam_cpas_register_params *)cmd_args;
+		rc = cam_cpas_hw_register_client(hw_priv, register_params);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_UNREGISTER_CLIENT: {
+		uint32_t *client_handle;
+
+		if (sizeof(uint32_t) != arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		client_handle = (uint32_t *)cmd_args;
+		rc = cam_cpas_hw_unregister_client(hw_priv, *client_handle);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_WRITE: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_write;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_write =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_write(hw_priv, reg_write->client_handle,
+			reg_write->reg_base, reg_write->offset, reg_write->mb,
+			reg_write->value);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_REG_READ: {
+		struct cam_cpas_hw_cmd_reg_read_write *reg_read;
+
+		if (sizeof(struct cam_cpas_hw_cmd_reg_read_write) !=
+			arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		reg_read =
+			(struct cam_cpas_hw_cmd_reg_read_write *)cmd_args;
+		rc = cam_cpas_hw_reg_read(hw_priv,
+			reg_read->client_handle, reg_read->reg_base,
+			reg_read->offset, reg_read->mb, &reg_read->value);
+
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AHB_VOTE: {
+		struct cam_cpas_hw_cmd_ahb_vote *cmd_ahb_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_ahb_vote) != arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_ahb_vote = (struct cam_cpas_hw_cmd_ahb_vote *)cmd_args;
+		rc = cam_cpas_hw_update_ahb_vote(hw_priv,
+			cmd_ahb_vote->client_handle, cmd_ahb_vote->ahb_vote);
+		break;
+	}
+	case CAM_CPAS_HW_CMD_AXI_VOTE: {
+		struct cam_cpas_hw_cmd_axi_vote *cmd_axi_vote;
+
+		if (sizeof(struct cam_cpas_hw_cmd_axi_vote) != arg_size) {
+			CAM_ERR(CAM_CPAS, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_axi_vote = (struct cam_cpas_hw_cmd_axi_vote *)cmd_args;
+		rc = cam_cpas_hw_update_axi_vote(hw_priv,
+			cmd_axi_vote->client_handle, cmd_axi_vote->axi_vote);
+		break;
+	}
+	default:
+		CAM_ERR(CAM_CPAS, "CPAS HW command not valid =%d", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_util_client_setup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
+		mutex_init(&cpas_core->client_mutex[i]);
+		cpas_core->cpas_client[i] = NULL;
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_client_cleanup(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	int i;
+
+	for (i = 0; i < CAM_CPAS_MAX_CLIENTS; i++) {
+		if (cpas_core->cpas_client[i]) {
+			cam_cpas_hw_unregister_client(cpas_hw, i);
+			cpas_core->cpas_client[i] = NULL;
+		}
+		mutex_destroy(&cpas_core->client_mutex[i]);
+	}
+
+	return 0;
+}
+
+static int cam_cpas_util_get_internal_ops(struct platform_device *pdev,
+	struct cam_hw_intf *hw_intf, struct cam_cpas_internal_ops *internal_ops)
+{
+	struct device_node *of_node = pdev->dev.of_node;
+	int rc;
+	const char *compat_str = NULL;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&compat_str);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed to get arch-compat rc=%d", rc);
+		return -EINVAL;
+	}
+
+	if (strnstr(compat_str, "camss_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CAMSSTOP;
+		rc = cam_camsstop_get_internal_ops(internal_ops);
+	} else if (strnstr(compat_str, "cpas_top", strlen(compat_str))) {
+		hw_intf->hw_type = CAM_HW_CPASTOP;
+		rc = cam_cpastop_get_internal_ops(internal_ops);
+	} else {
+		CAM_ERR(CAM_CPAS, "arch-compat %s not supported", compat_str);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_info *cpas_hw = NULL;
+	struct cam_hw_intf *cpas_hw_intf = NULL;
+	struct cam_cpas *cpas_core = NULL;
+	struct cam_cpas_private_soc *soc_private;
+	struct cam_cpas_internal_ops *internal_ops;
+
+	cpas_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!cpas_hw_intf)
+		return -ENOMEM;
+
+	cpas_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!cpas_hw) {
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	cpas_core = kzalloc(sizeof(struct cam_cpas), GFP_KERNEL);
+	if (!cpas_core) {
+		kfree(cpas_hw);
+		kfree(cpas_hw_intf);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < CAM_CPAS_REG_MAX; i++)
+		cpas_core->regbase_index[i] = -1;
+
+	cpas_hw_intf->hw_priv = cpas_hw;
+	cpas_hw->core_info = cpas_core;
+
+	cpas_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	cpas_hw->soc_info.pdev = pdev;
+	cpas_hw->soc_info.dev = &pdev->dev;
+	cpas_hw->soc_info.dev_name = pdev->name;
+	cpas_hw->open_count = 0;
+	mutex_init(&cpas_hw->hw_mutex);
+	spin_lock_init(&cpas_hw->hw_lock);
+	init_completion(&cpas_hw->hw_complete);
+
+	cpas_hw_intf->hw_ops.get_hw_caps = cam_cpas_hw_get_hw_info;
+	cpas_hw_intf->hw_ops.init = cam_cpas_hw_init;
+	cpas_hw_intf->hw_ops.deinit = NULL;
+	cpas_hw_intf->hw_ops.reset = NULL;
+	cpas_hw_intf->hw_ops.reserve = NULL;
+	cpas_hw_intf->hw_ops.release = NULL;
+	cpas_hw_intf->hw_ops.start = cam_cpas_hw_start;
+	cpas_hw_intf->hw_ops.stop = cam_cpas_hw_stop;
+	cpas_hw_intf->hw_ops.read = NULL;
+	cpas_hw_intf->hw_ops.write = NULL;
+	cpas_hw_intf->hw_ops.process_cmd = cam_cpas_hw_process_cmd;
+
+	cpas_core->work_queue = alloc_workqueue("cam-cpas",
+		WQ_UNBOUND | WQ_MEM_RECLAIM, CAM_CPAS_INFLIGHT_WORKS);
+	if (!cpas_core->work_queue) {
+		rc = -ENOMEM;
+		goto release_mem;
+	}
+
+	internal_ops = &cpas_core->internal_ops;
+	rc = cam_cpas_util_get_internal_ops(pdev, cpas_hw_intf, internal_ops);
+	if (rc)
+		goto release_workq;
+
+	rc = cam_cpas_soc_init_resources(&cpas_hw->soc_info,
+		internal_ops->handle_irq, cpas_hw);
+	if (rc)
+		goto release_workq;
+
+	soc_private = (struct cam_cpas_private_soc *)
+		cpas_hw->soc_info.soc_private;
+	cpas_core->num_clients = soc_private->num_clients;
+	atomic_set(&cpas_core->irq_count, 0);
+	init_waitqueue_head(&cpas_core->irq_count_wq);
+
+	if (internal_ops->setup_regbase) {
+		rc = internal_ops->setup_regbase(&cpas_hw->soc_info,
+			cpas_core->regbase_index, CAM_CPAS_REG_MAX);
+		if (rc)
+			goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_client_setup(cpas_hw);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in client setup, rc=%d", rc);
+		goto deinit_platform_res;
+	}
+
+	rc = cam_cpas_util_register_bus_client(&cpas_hw->soc_info,
+		cpas_hw->soc_info.pdev->dev.of_node,
+		&cpas_core->ahb_bus_client);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in ahb setup, rc=%d", rc);
+		goto client_cleanup;
+	}
+
+	rc = cam_cpas_util_axi_setup(cpas_core, &cpas_hw->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in axi setup, rc=%d", rc);
+		goto ahb_cleanup;
+	}
+
+	/* Need to vote first before enabling clocks */
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, true);
+	if (rc)
+		goto axi_cleanup;
+
+	rc = cam_cpas_soc_enable_resources(&cpas_hw->soc_info, CAM_SVS_VOTE);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in soc_enable_resources, rc=%d", rc);
+		goto remove_default_vote;
+	}
+
+	if (internal_ops->get_hw_info) {
+		rc = internal_ops->get_hw_info(cpas_hw, &cpas_core->hw_caps);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "failed in get_hw_info, rc=%d", rc);
+			goto disable_soc_res;
+		}
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid get_hw_info");
+		goto disable_soc_res;
+	}
+
+	rc = cam_cpas_hw_init(cpas_hw_intf->hw_priv,
+		&cpas_core->hw_caps, sizeof(struct cam_cpas_hw_caps));
+	if (rc)
+		goto disable_soc_res;
+
+	rc = cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in soc_disable_resources, rc=%d", rc);
+		goto remove_default_vote;
+	}
+
+	rc = cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+	if (rc)
+		goto axi_cleanup;
+
+	*hw_intf = cpas_hw_intf;
+	return 0;
+
+disable_soc_res:
+	cam_cpas_soc_disable_resources(&cpas_hw->soc_info, true, true);
+remove_default_vote:
+	cam_cpas_util_vote_default_ahb_axi(cpas_hw, false);
+axi_cleanup:
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+ahb_cleanup:
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+client_cleanup:
+	cam_cpas_util_client_cleanup(cpas_hw);
+deinit_platform_res:
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+release_workq:
+	flush_workqueue(cpas_core->work_queue);
+	destroy_workqueue(cpas_core->work_queue);
+release_mem:
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+	CAM_ERR(CAM_CPAS, "failed in hw probe");
+	return rc;
+}
+
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf)
+{
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+
+	if (!cpas_hw_intf) {
+		CAM_ERR(CAM_CPAS, "cpas interface not initialized");
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info *)cpas_hw_intf->hw_priv;
+	cpas_core = (struct cam_cpas *)cpas_hw->core_info;
+
+	if (cpas_hw->hw_state == CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_CPAS, "cpas hw is in power up state");
+		return -EINVAL;
+	}
+
+	cam_cpas_util_axi_cleanup(cpas_core, &cpas_hw->soc_info);
+	cam_cpas_util_unregister_bus_client(&cpas_core->ahb_bus_client);
+	cam_cpas_util_client_cleanup(cpas_hw);
+	cam_cpas_soc_deinit_resources(&cpas_hw->soc_info);
+	flush_workqueue(cpas_core->work_queue);
+	destroy_workqueue(cpas_core->work_queue);
+	mutex_destroy(&cpas_hw->hw_mutex);
+	kfree(cpas_core);
+	kfree(cpas_hw);
+	kfree(cpas_hw_intf);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
new file mode 100644
index 0000000..e30ba7a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw.h
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPAS_HW_H_
+#define _CAM_CPAS_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_common_util.h"
+
+#define CAM_CPAS_MAX_CLIENTS 30
+#define CAM_CPAS_INFLIGHT_WORKS 5
+
+#define CAM_CPAS_AXI_MIN_MNOC_AB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_MNOC_IB_BW   (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_AB_BW (2048 * 1024)
+#define CAM_CPAS_AXI_MIN_CAMNOC_IB_BW (3000000000UL)
+
+#define CAM_CPAS_GET_CLIENT_IDX(handle) (handle)
+#define CAM_CPAS_GET_CLIENT_HANDLE(indx) (indx)
+
+#define CAM_CPAS_CLIENT_VALID(indx) \
+	((indx >= 0) && (indx < CAM_CPAS_MAX_CLIENTS))
+#define CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)        \
+	((CAM_CPAS_CLIENT_VALID(indx)) && \
+	(cpas_core->cpas_client[indx]))
+#define CAM_CPAS_CLIENT_STARTED(cpas_core, indx)          \
+	((CAM_CPAS_CLIENT_REGISTERED(cpas_core, indx)) && \
+	(cpas_core->cpas_client[indx]->started))
+
+/**
+ * enum cam_cpas_access_type - Enum for Register access type
+ */
+enum cam_cpas_access_type {
+	CAM_REG_TYPE_READ,
+	CAM_REG_TYPE_WRITE,
+	CAM_REG_TYPE_READ_WRITE,
+};
+
+/**
+ * struct cam_cpas_internal_ops - CPAS Hardware layer internal ops
+ *
+ * @get_hw_info: Function pointer for get hw info
+ * @init_hw_version: Function pointer for hw init based on version
+ * @handle_irq: Function poniter for irq handling
+ * @setup_regbase: Function pointer for setup rebase indices
+ * @power_on: Function pointer for hw core specific power on settings
+ * @power_off: Function pointer for hw core specific power off settings
+ *
+ */
+struct cam_cpas_internal_ops {
+	int (*get_hw_info)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	int (*init_hw_version)(struct cam_hw_info *cpas_hw,
+		struct cam_cpas_hw_caps *hw_caps);
+	irqreturn_t (*handle_irq)(int irq_num, void *data);
+	int (*setup_regbase)(struct cam_hw_soc_info *soc_info,
+		int32_t regbase_index[], int32_t num_reg_map);
+	int (*power_on)(struct cam_hw_info *cpas_hw);
+	int (*power_off)(struct cam_hw_info *cpas_hw);
+};
+
+/**
+ * struct cam_cpas_reg : CPAS register info
+ *
+ * @enable: Whether this reg info need to be enabled
+ * @access_type: Register access type
+ * @masked_value: Whether this register write/read is based on mask, shift
+ * @mask: Mask for this register value
+ * @shift: Shift for this register value
+ * @value: Register value
+ *
+ */
+struct cam_cpas_reg {
+	bool enable;
+	enum cam_cpas_access_type access_type;
+	bool masked_value;
+	uint32_t offset;
+	uint32_t mask;
+	uint32_t shift;
+	uint32_t value;
+};
+
+/**
+ * struct cam_cpas_client : CPAS Client structure info
+ *
+ * @data: Client register params
+ * @started: Whether client has streamed on
+ * @ahb_level: Determined/Applied ahb level for the client
+ * @axi_vote: Determined/Applied axi vote for the client
+ * @axi_port: Client's parent axi port
+ * @axi_sibling_client: Client's sibllings sharing the same axi port
+ *
+ */
+struct cam_cpas_client {
+	struct cam_cpas_register_params data;
+	bool started;
+	enum cam_vote_level ahb_level;
+	struct cam_axi_vote axi_vote;
+	struct cam_cpas_axi_port *axi_port;
+	struct list_head axi_sibling_client;
+};
+
+/**
+ * struct cam_cpas_bus_client : Bus client information
+ *
+ * @src: Bus master/src id
+ * @dst: Bus slave/dst id
+ * @pdata: Bus pdata information
+ * @client_id: Bus client id
+ * @num_usecases: Number of use cases for this client
+ * @num_paths: Number of paths for this client
+ * @curr_vote_level: current voted index
+ * @dyn_vote: Whether dynamic voting enabled
+ * @lock: Mutex lock used while voting on this client
+ * @valid: Whether bus client is valid
+ * @name: Name of the bus client
+ *
+ */
+struct cam_cpas_bus_client {
+	int src;
+	int dst;
+	struct msm_bus_scale_pdata *pdata;
+	uint32_t client_id;
+	int num_usecases;
+	int num_paths;
+	unsigned int curr_vote_level;
+	bool dyn_vote;
+	struct mutex lock;
+	bool valid;
+	const char *name;
+};
+
+/**
+ * struct cam_cpas_axi_port : AXI port information
+ *
+ * @sibling_port: Sibling AXI ports
+ * @clients_list_head: List head pointing to list of clients sharing this port
+ * @lock: Mutex lock for accessing this port
+ * @camnoc_bus: CAMNOC bus client info for this port
+ * @mnoc_bus: MNOC bus client info for this port
+ * @axi_port_name: Name of this AXI port
+ * @axi_port_node: Node representing this AXI Port
+ * @axi_port_mnoc_node: Node representing mnoc in this AXI Port
+ * @axi_port_camnoc_node: Node representing camnoc in this AXI Port
+ * @consolidated_axi_vote: Consolidated axi bw values for this AXI port
+ */
+struct cam_cpas_axi_port {
+	struct list_head sibling_port;
+	struct list_head clients_list_head;
+	struct mutex lock;
+	struct cam_cpas_bus_client camnoc_bus;
+	struct cam_cpas_bus_client mnoc_bus;
+	const char *axi_port_name;
+	struct device_node *axi_port_node;
+	struct device_node *axi_port_mnoc_node;
+	struct device_node *axi_port_camnoc_node;
+	struct cam_axi_vote consolidated_axi_vote;
+};
+
+/**
+ * struct cam_cpas : CPAS core data structure info
+ *
+ * @hw_caps: CPAS hw capabilities
+ * @cpas_client: Array of pointers to CPAS clients info
+ * @client_mutex: Mutex for accessing client info
+ * @num_clients: Total number of clients that CPAS supports
+ * @registered_clients: Number of Clients registered currently
+ * @streamon_clients: Number of Clients that are in start state currently
+ * @regbase_index: Register base indices for CPAS register base IDs
+ * @ahb_bus_client: AHB Bus client info
+ * @axi_ports_list_head: Head pointing to list of AXI ports
+ * @internal_ops: CPAS HW internal ops
+ * @work_queue: Work queue handle
+ *
+ */
+struct cam_cpas {
+	struct cam_cpas_hw_caps hw_caps;
+	struct cam_cpas_client *cpas_client[CAM_CPAS_MAX_CLIENTS];
+	struct mutex client_mutex[CAM_CPAS_MAX_CLIENTS];
+	uint32_t num_clients;
+	uint32_t registered_clients;
+	uint32_t streamon_clients;
+	int32_t regbase_index[CAM_CPAS_REG_MAX];
+	struct cam_cpas_bus_client ahb_bus_client;
+	struct list_head axi_ports_list_head;
+	struct cam_cpas_internal_ops internal_ops;
+	struct workqueue_struct *work_queue;
+	atomic_t irq_count;
+	wait_queue_head_t irq_count_wq;
+};
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops);
+
+int cam_cpas_util_reg_update(struct cam_hw_info *cpas_hw,
+	enum cam_cpas_reg_base reg_base, struct cam_cpas_reg *reg_info);
+
+#endif /* _CAM_CPAS_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
new file mode 100644
index 0000000..1d696a0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_hw_intf.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPAS_HW_INTF_H_
+#define _CAM_CPAS_HW_INTF_H_
+
+#include <linux/platform_device.h>
+
+#include "cam_cpas_api.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_debug_util.h"
+
+/* Number of times to retry while polling */
+#define CAM_CPAS_POLL_RETRY_CNT 5
+/* Minimum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MIN_USECS 200
+/* Maximum usecs to sleep while polling */
+#define CAM_CPAS_POLL_MAX_USECS 250
+
+/**
+ * enum cam_cpas_hw_type - Enum for CPAS HW type
+ */
+enum cam_cpas_hw_type {
+	CAM_HW_CPASTOP,
+	CAM_HW_CAMSSTOP,
+};
+
+/**
+ * enum cam_cpas_hw_cmd_process - Enum for CPAS HW process command type
+ */
+enum cam_cpas_hw_cmd_process {
+	CAM_CPAS_HW_CMD_REGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+	CAM_CPAS_HW_CMD_REG_WRITE,
+	CAM_CPAS_HW_CMD_REG_READ,
+	CAM_CPAS_HW_CMD_AHB_VOTE,
+	CAM_CPAS_HW_CMD_AXI_VOTE,
+	CAM_CPAS_HW_CMD_INVALID,
+};
+
+/**
+ * struct cam_cpas_hw_cmd_reg_read_write : CPAS cmd struct for reg read, write
+ *
+ * @client_handle: Client handle
+ * @reg_base: Register base type
+ * @offset: Register offset
+ * @value: Register value
+ * @mb: Whether to do operation with memory barrier
+ *
+ */
+struct cam_cpas_hw_cmd_reg_read_write {
+	uint32_t client_handle;
+	enum cam_cpas_reg_base reg_base;
+	uint32_t offset;
+	uint32_t value;
+	bool mb;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_ahb_vote : CPAS cmd struct for AHB vote
+ *
+ * @client_handle: Client handle
+ * @ahb_vote: AHB voting info
+ *
+ */
+struct cam_cpas_hw_cmd_ahb_vote {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_axi_vote : CPAS cmd struct for AXI vote
+ *
+ * @client_handle: Client handle
+ * @axi_vote: axi bandwidth vote
+ *
+ */
+struct cam_cpas_hw_cmd_axi_vote {
+	uint32_t client_handle;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_start : CPAS cmd struct for start
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_start {
+	uint32_t client_handle;
+	struct cam_ahb_vote *ahb_vote;
+	struct cam_axi_vote *axi_vote;
+};
+
+/**
+ * struct cam_cpas_hw_cmd_stop : CPAS cmd struct for stop
+ *
+ * @client_handle: Client handle
+ *
+ */
+struct cam_cpas_hw_cmd_stop {
+	uint32_t client_handle;
+};
+
+/**
+ * struct cam_cpas_hw_caps : CPAS HW capabilities
+ *
+ * @camera_family: Camera family type
+ * @camera_version: Camera version
+ * @cpas_version: CPAS version
+ * @camera_capability: Camera hw capabilities
+ *
+ */
+struct cam_cpas_hw_caps {
+	uint32_t camera_family;
+	struct cam_hw_version camera_version;
+	struct cam_hw_version cpas_version;
+	uint32_t camera_capability;
+};
+
+int cam_cpas_hw_probe(struct platform_device *pdev,
+	struct cam_hw_intf **hw_intf);
+int cam_cpas_hw_remove(struct cam_hw_intf *cpas_hw_intf);
+
+#endif /* _CAM_CPAS_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
new file mode 100644
index 0000000..c2153ef
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_intf.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_cpas_hw_intf.h"
+
+#define CAM_CPAS_DEV_NAME    "cam-cpas"
+#define CAM_CPAS_INTF_INITIALIZED() (g_cpas_intf && g_cpas_intf->probe_done)
+
+/**
+ * struct cam_cpas_intf : CPAS interface
+ *
+ * @pdev: Platform device
+ * @subdev: Subdev info
+ * @hw_intf: CPAS HW interface
+ * @hw_caps: CPAS HW capabilities
+ * @intf_lock: CPAS interface mutex
+ * @open_cnt: CPAS subdev open count
+ * @probe_done: Whether CPAS prove completed
+ *
+ */
+struct cam_cpas_intf {
+	struct platform_device *pdev;
+	struct cam_subdev subdev;
+	struct cam_hw_intf *hw_intf;
+	struct cam_cpas_hw_caps hw_caps;
+	struct mutex intf_lock;
+	uint32_t open_cnt;
+	bool probe_done;
+};
+
+static struct cam_cpas_intf *g_cpas_intf;
+
+int cam_cpas_get_cpas_hw_version(uint32_t *hw_version)
+{
+	struct cam_hw_info *cpas_hw = NULL;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (!hw_version) {
+		CAM_ERR(CAM_CPAS, "invalid input %pK", hw_version);
+		return -EINVAL;
+	}
+
+	cpas_hw = (struct cam_hw_info  *) g_cpas_intf->hw_intf->hw_priv;
+
+	*hw_version = cpas_hw->soc_info.hw_version;
+
+	if (*hw_version == CAM_CPAS_TITAN_NONE) {
+		CAM_DBG(CAM_CPAS, "Didn't find a valid HW Version %d",
+			*hw_version);
+	}
+
+	return 0;
+}
+
+
+int cam_cpas_get_hw_info(uint32_t *camera_family,
+	struct cam_hw_version *camera_version,
+	struct cam_hw_version *cpas_version,
+	uint32_t *cam_caps)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (!camera_family || !camera_version || !cpas_version || !cam_caps) {
+		CAM_ERR(CAM_CPAS, "invalid input %pK %pK %pK %pK",
+			camera_family, camera_version, cpas_version, cam_caps);
+		return -EINVAL;
+	}
+
+	*camera_family = g_cpas_intf->hw_caps.camera_family;
+	*camera_version = g_cpas_intf->hw_caps.camera_version;
+	*cpas_version = g_cpas_intf->hw_caps.cpas_version;
+	*cam_caps = g_cpas_intf->hw_caps.camera_capability;
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_cpas_get_hw_info);
+
+int cam_cpas_reg_write(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_write;
+
+		cmd_reg_write.client_handle = client_handle;
+		cmd_reg_write.reg_base = reg_base;
+		cmd_reg_write.offset = offset;
+		cmd_reg_write.value = value;
+		cmd_reg_write.mb = mb;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_WRITE, &cmd_reg_write,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_write);
+
+int cam_cpas_reg_read(uint32_t client_handle,
+	enum cam_cpas_reg_base reg_base, uint32_t offset, bool mb,
+	uint32_t *value)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (!value) {
+		CAM_ERR(CAM_CPAS, "Invalid arg value");
+		return -EINVAL;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_reg_read_write cmd_reg_read;
+
+		cmd_reg_read.client_handle = client_handle;
+		cmd_reg_read.reg_base = reg_base;
+		cmd_reg_read.offset = offset;
+		cmd_reg_read.mb = mb;
+		cmd_reg_read.value = 0;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REG_READ, &cmd_reg_read,
+			sizeof(struct cam_cpas_hw_cmd_reg_read_write));
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+			return rc;
+		}
+
+		*value = cmd_reg_read.value;
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_reg_read);
+
+int cam_cpas_update_axi_vote(uint32_t client_handle,
+	struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_axi_vote cmd_axi_vote;
+
+		cmd_axi_vote.client_handle = client_handle;
+		cmd_axi_vote.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AXI_VOTE, &cmd_axi_vote,
+			sizeof(struct cam_cpas_hw_cmd_axi_vote));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_axi_vote);
+
+int cam_cpas_update_ahb_vote(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		struct cam_cpas_hw_cmd_ahb_vote cmd_ahb_vote;
+
+		cmd_ahb_vote.client_handle = client_handle;
+		cmd_ahb_vote.ahb_vote = ahb_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_AHB_VOTE, &cmd_ahb_vote,
+			sizeof(struct cam_cpas_hw_cmd_ahb_vote));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_update_ahb_vote);
+
+int cam_cpas_stop(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.stop) {
+		struct cam_cpas_hw_cmd_stop cmd_hw_stop;
+
+		cmd_hw_stop.client_handle = client_handle;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.stop(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_stop,
+			sizeof(struct cam_cpas_hw_cmd_stop));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in stop, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid stop ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_stop);
+
+int cam_cpas_start(uint32_t client_handle,
+	struct cam_ahb_vote *ahb_vote, struct cam_axi_vote *axi_vote)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.start) {
+		struct cam_cpas_hw_cmd_start cmd_hw_start;
+
+		cmd_hw_start.client_handle = client_handle;
+		cmd_hw_start.ahb_vote = ahb_vote;
+		cmd_hw_start.axi_vote = axi_vote;
+
+		rc = g_cpas_intf->hw_intf->hw_ops.start(
+			g_cpas_intf->hw_intf->hw_priv, &cmd_hw_start,
+			sizeof(struct cam_cpas_hw_cmd_start));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in start, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid start ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_start);
+
+int cam_cpas_unregister_client(uint32_t client_handle)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_UNREGISTER_CLIENT,
+			&client_handle, sizeof(uint32_t));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_unregister_client);
+
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params)
+{
+	int rc;
+
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	if (g_cpas_intf->hw_intf->hw_ops.process_cmd) {
+		rc = g_cpas_intf->hw_intf->hw_ops.process_cmd(
+			g_cpas_intf->hw_intf->hw_priv,
+			CAM_CPAS_HW_CMD_REGISTER_CLIENT, register_params,
+			sizeof(struct cam_cpas_register_params));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in process_cmd, rc=%d", rc);
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid process_cmd ops");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_cpas_register_client);
+
+int cam_cpas_subdev_cmd(struct cam_cpas_intf *cpas_intf,
+	struct cam_control *cmd)
+{
+	int rc = 0;
+
+	if (!cmd) {
+		CAM_ERR(CAM_CPAS, "Invalid input cmd");
+		return -EINVAL;
+	}
+
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP: {
+		struct cam_cpas_query_cap query;
+
+		rc = copy_from_user(&query, u64_to_user_ptr(cmd->handle),
+			sizeof(query));
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "Failed in copy from user, rc=%d",
+				rc);
+			break;
+		}
+
+		rc = cam_cpas_get_hw_info(&query.camera_family,
+			&query.camera_version, &query.cpas_version,
+			&query.reserved);
+		if (rc)
+			break;
+
+		rc = copy_to_user(u64_to_user_ptr(cmd->handle), &query,
+			sizeof(query));
+		if (rc)
+			CAM_ERR(CAM_CPAS, "Failed in copy to user, rc=%d", rc);
+
+		break;
+	}
+	case CAM_SD_SHUTDOWN:
+		break;
+	default:
+		CAM_ERR(CAM_CPAS, "Unknown op code %d for CPAS", cmd->op_code);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_cpas_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt++;
+	CAM_DBG(CAM_CPAS, "CPAS Subdev open count %d", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static int cam_cpas_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
+		return -ENODEV;
+	}
+
+	mutex_lock(&cpas_intf->intf_lock);
+	cpas_intf->open_cnt--;
+	CAM_DBG(CAM_CPAS, "CPAS Subdev close count %d", cpas_intf->open_cnt);
+	mutex_unlock(&cpas_intf->intf_lock);
+
+	return 0;
+}
+
+static long cam_cpas_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, (struct cam_control *) arg);
+		break;
+	default:
+		CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cpas_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc;
+	struct cam_cpas_intf *cpas_intf = v4l2_get_subdevdata(sd);
+
+	if (!cpas_intf || !cpas_intf->probe_done) {
+		CAM_ERR(CAM_CPAS, "CPAS not initialized");
+		return -ENODEV;
+	}
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_CPAS, "Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_cpas_subdev_cmd(cpas_intf, &cmd_data);
+		break;
+	default:
+		CAM_ERR(CAM_CPAS, "Invalid command %d for CPAS!", cmd);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_CPAS,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops cpas_subdev_core_ops = {
+	.ioctl = cam_cpas_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_cpas_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops cpas_subdev_ops = {
+	.core = &cpas_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cpas_subdev_intern_ops = {
+	.open = cam_cpas_subdev_open,
+	.close = cam_cpas_subdev_close,
+};
+
+static int cam_cpas_subdev_register(struct platform_device *pdev)
+{
+	int rc;
+	struct cam_subdev *subdev;
+
+	if (!g_cpas_intf)
+		return -EINVAL;
+
+	subdev = &g_cpas_intf->subdev;
+
+	subdev->name = CAM_CPAS_DEV_NAME;
+	subdev->pdev = pdev;
+	subdev->ops = &cpas_subdev_ops;
+	subdev->internal_ops = &cpas_subdev_intern_ops;
+	subdev->token = g_cpas_intf;
+	subdev->sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	subdev->ent_function = CAM_CPAS_DEVICE_TYPE;
+
+	rc = cam_register_subdev(subdev);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed register subdev: %s!",
+			CAM_CPAS_DEV_NAME);
+		return rc;
+	}
+
+	platform_set_drvdata(g_cpas_intf->pdev, g_cpas_intf);
+	return rc;
+}
+
+static int cam_cpas_dev_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_hw_caps *hw_caps;
+	struct cam_hw_intf *hw_intf;
+	int rc;
+
+	if (g_cpas_intf) {
+		CAM_ERR(CAM_CPAS, "cpas dev proble already done");
+		return -EALREADY;
+	}
+
+	g_cpas_intf = kzalloc(sizeof(*g_cpas_intf), GFP_KERNEL);
+	if (!g_cpas_intf)
+		return -ENOMEM;
+
+	mutex_init(&g_cpas_intf->intf_lock);
+	g_cpas_intf->pdev = pdev;
+
+	rc = cam_cpas_hw_probe(pdev, &g_cpas_intf->hw_intf);
+	if (rc || (g_cpas_intf->hw_intf == NULL)) {
+		CAM_ERR(CAM_CPAS, "Failed in hw probe, rc=%d", rc);
+		goto error_destroy_mem;
+	}
+
+	hw_intf = g_cpas_intf->hw_intf;
+	hw_caps = &g_cpas_intf->hw_caps;
+	if (hw_intf->hw_ops.get_hw_caps) {
+		rc = hw_intf->hw_ops.get_hw_caps(hw_intf->hw_priv,
+			hw_caps, sizeof(struct cam_cpas_hw_caps));
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "Failed in get_hw_caps, rc=%d", rc);
+			goto error_hw_remove;
+		}
+	} else {
+		CAM_ERR(CAM_CPAS, "Invalid get_hw_caps ops");
+		goto error_hw_remove;
+	}
+
+	rc = cam_cpas_subdev_register(pdev);
+	if (rc)
+		goto error_hw_remove;
+
+	g_cpas_intf->probe_done = true;
+	CAM_DBG(CAM_CPAS,
+		"CPAS INTF Probe success %d, %d.%d.%d, %d.%d.%d, 0x%x",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	return rc;
+
+error_hw_remove:
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+error_destroy_mem:
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+	CAM_ERR(CAM_CPAS, "CPAS probe failed");
+	return rc;
+}
+
+static int cam_cpas_dev_remove(struct platform_device *dev)
+{
+	if (!CAM_CPAS_INTF_INITIALIZED()) {
+		CAM_ERR(CAM_CPAS, "cpas intf not initialized");
+		return -ENODEV;
+	}
+
+	mutex_lock(&g_cpas_intf->intf_lock);
+	g_cpas_intf->probe_done = false;
+	cam_unregister_subdev(&g_cpas_intf->subdev);
+	cam_cpas_hw_remove(g_cpas_intf->hw_intf);
+	mutex_unlock(&g_cpas_intf->intf_lock);
+	mutex_destroy(&g_cpas_intf->intf_lock);
+	kfree(g_cpas_intf);
+	g_cpas_intf = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id cam_cpas_dt_match[] = {
+	{.compatible = "qcom,cam-cpas"},
+	{}
+};
+
+static struct platform_driver cam_cpas_driver = {
+	.probe = cam_cpas_dev_probe,
+	.remove = cam_cpas_dev_remove,
+	.driver = {
+		.name = CAM_CPAS_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_cpas_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_cpas_dev_init_module(void)
+{
+	return platform_driver_register(&cam_cpas_driver);
+}
+
+static void __exit cam_cpas_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_cpas_driver);
+}
+
+module_init(cam_cpas_dev_init_module);
+module_exit(cam_cpas_dev_exit_module);
+MODULE_DESCRIPTION("MSM CPAS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
new file mode 100644
index 0000000..6f54844
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_cpas_get_custom_dt_info(struct platform_device *pdev,
+	struct cam_cpas_private_soc *soc_private)
+{
+	struct device_node *of_node;
+	int count = 0, i = 0, rc = 0;
+
+	if (!soc_private || !pdev) {
+		CAM_ERR(CAM_CPAS, "invalid input arg %pK %pK",
+			soc_private, pdev);
+		return -EINVAL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&soc_private->arch_compat);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "device %s failed to read arch-compat",
+			pdev->name);
+		return rc;
+	}
+
+	soc_private->camnoc_axi_min_ib_bw = 0;
+	rc = of_property_read_u64(of_node,
+		"camnoc-axi-min-ib-bw",
+		&soc_private->camnoc_axi_min_ib_bw);
+	if (rc == -EOVERFLOW) {
+		soc_private->camnoc_axi_min_ib_bw = 0;
+		rc = of_property_read_u32(of_node,
+			"camnoc-axi-min-ib-bw",
+			(u32 *)&soc_private->camnoc_axi_min_ib_bw);
+	}
+
+	if (rc) {
+		CAM_DBG(CAM_CPAS,
+			"failed to read camnoc-axi-min-ib-bw rc:%d", rc);
+		soc_private->camnoc_axi_min_ib_bw =
+			CAM_CPAS_AXI_MIN_CAMNOC_IB_BW;
+	}
+
+	CAM_DBG(CAM_CPAS, "camnoc-axi-min-ib-bw = %llu",
+		soc_private->camnoc_axi_min_ib_bw);
+
+	soc_private->client_id_based = of_property_read_bool(of_node,
+		"client-id-based");
+
+	count = of_property_count_strings(of_node, "client-names");
+	if (count <= 0) {
+		CAM_ERR(CAM_CPAS, "no client-names found");
+		count = 0;
+		return -EINVAL;
+	}
+	soc_private->num_clients = count;
+	CAM_DBG(CAM_CPAS,
+		"arch-compat=%s, client_id_based = %d, num_clients=%d",
+		soc_private->arch_compat, soc_private->client_id_based,
+		soc_private->num_clients);
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-names", i, &soc_private->client_name[i]);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
+			return -ENODEV;
+		}
+		CAM_DBG(CAM_CPAS, "Client[%d] : %s", i,
+			soc_private->client_name[i]);
+	}
+
+	count = of_property_count_strings(of_node, "client-axi-port-names");
+	if ((count <= 0) || (count != soc_private->num_clients)) {
+		CAM_ERR(CAM_CPAS, "incorrect client-axi-port-names info %d %d",
+			count, soc_private->num_clients);
+		count = 0;
+		return -EINVAL;
+	}
+
+	for (i = 0; i < soc_private->num_clients; i++) {
+		rc = of_property_read_string_index(of_node,
+			"client-axi-port-names", i,
+			&soc_private->client_axi_port_name[i]);
+		if (rc) {
+			CAM_ERR(CAM_CPAS, "no client-name at cnt=%d", i);
+			return -ENODEV;
+		}
+		CAM_DBG(CAM_CPAS, "Client AXI Port[%d] : %s", i,
+			soc_private->client_axi_port_name[i]);
+	}
+
+	soc_private->axi_camnoc_based = of_property_read_bool(of_node,
+		"client-bus-camnoc-based");
+
+	soc_private->control_camnoc_axi_clk = of_property_read_bool(of_node,
+		"control-camnoc-axi-clk");
+
+	if (soc_private->control_camnoc_axi_clk == true) {
+		rc = of_property_read_u32(of_node, "camnoc-bus-width",
+			&soc_private->camnoc_bus_width);
+		if (rc || (soc_private->camnoc_bus_width == 0)) {
+			CAM_ERR(CAM_CPAS, "Bus width not found rc=%d, %d",
+				rc, soc_private->camnoc_bus_width);
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node,
+			"camnoc-axi-clk-bw-margin-perc",
+			&soc_private->camnoc_axi_clk_bw_margin);
+
+		if (rc) {
+			/* this is not fatal, overwrite rc */
+			rc = 0;
+			soc_private->camnoc_axi_clk_bw_margin = 0;
+		}
+	}
+
+	CAM_DBG(CAM_CPAS,
+		"control_camnoc_axi_clk=%d, width=%d, margin=%d",
+		soc_private->control_camnoc_axi_clk,
+		soc_private->camnoc_bus_width,
+		soc_private->camnoc_axi_clk_bw_margin);
+
+	count = of_property_count_u32_elems(of_node, "vdd-corners");
+	if ((count > 0) && (count <= CAM_REGULATOR_LEVEL_MAX) &&
+		(of_property_count_strings(of_node, "vdd-corner-ahb-mapping") ==
+		count)) {
+		const char *ahb_string;
+
+		for (i = 0; i < count; i++) {
+			rc = of_property_read_u32_index(of_node, "vdd-corners",
+				i, &soc_private->vdd_ahb[i].vdd_corner);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"vdd-corners failed at index=%d", i);
+				return -ENODEV;
+			}
+
+			rc = of_property_read_string_index(of_node,
+				"vdd-corner-ahb-mapping", i, &ahb_string);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"no ahb-mapping at index=%d", i);
+				return -ENODEV;
+			}
+
+			rc = cam_soc_util_get_level_from_string(ahb_string,
+				&soc_private->vdd_ahb[i].ahb_level);
+			if (rc) {
+				CAM_ERR(CAM_CPAS,
+					"invalid ahb-string at index=%d", i);
+				return -EINVAL;
+			}
+
+			CAM_DBG(CAM_CPAS,
+				"Vdd-AHB mapping [%d] : [%d] [%s] [%d]", i,
+				soc_private->vdd_ahb[i].vdd_corner,
+				ahb_string, soc_private->vdd_ahb[i].ahb_level);
+		}
+
+		soc_private->num_vdd_ahb_mapping = count;
+	}
+
+	return 0;
+}
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in get_dt_properties, rc=%d", rc);
+		return rc;
+	}
+
+	if (soc_info->irq_line && !irq_handler) {
+		CAM_ERR(CAM_CPAS, "Invalid IRQ handler");
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		irq_data);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in request_platform_resource, rc=%d",
+			rc);
+		return rc;
+	}
+
+	soc_info->soc_private = kzalloc(sizeof(struct cam_cpas_private_soc),
+		GFP_KERNEL);
+	if (!soc_info->soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+
+	rc = cam_cpas_get_custom_dt_info(soc_info->pdev, soc_info->soc_private);
+	if (rc) {
+		CAM_ERR(CAM_CPAS, "failed in get_custom_info, rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+	return rc;
+}
+
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "release platform failed, rc=%d", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
+
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level default_level)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		default_level, true);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "enable platform resource failed, rc=%d", rc);
+
+	return rc;
+}
+
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info,
+		disable_clocks, disable_irq);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "disable platform failed, rc=%d", rc);
+
+	return rc;
+}
+
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_irq_disable(soc_info);
+	if (rc)
+		CAM_ERR(CAM_CPAS, "disable irq failed, rc=%d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
new file mode 100644
index 0000000..a9d6e1f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cam_cpas_soc.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPAS_SOC_H_
+#define _CAM_CPAS_SOC_H_
+
+#include "cam_soc_util.h"
+#include "cam_cpas_hw.h"
+
+#define CAM_REGULATOR_LEVEL_MAX 16
+
+/**
+ * struct cam_cpas_vdd_ahb_mapping : Voltage to ahb level mapping
+ *
+ * @vdd_corner : Voltage corner value
+ * @ahb_level : AHB vote level corresponds to this vdd_corner
+ *
+ */
+struct cam_cpas_vdd_ahb_mapping {
+	unsigned int vdd_corner;
+	enum cam_vote_level ahb_level;
+};
+
+/**
+ * struct cam_cpas_private_soc : CPAS private DT info
+ *
+ * @arch_compat: ARCH compatible string
+ * @client_id_based: Whether clients are id based
+ * @num_clients: Number of clients supported
+ * @client_name: Client names
+ * @axi_camnoc_based: Whether AXi access is camnoc based
+ * @client_axi_port_name: AXI Port name for each client
+ * @axi_port_list_node : Node representing AXI Ports list
+ * @num_vdd_ahb_mapping : Number of vdd to ahb level mapping supported
+ * @vdd_ahb : AHB level mapping info for the supported vdd levels
+ * @control_camnoc_axi_clk : Whether CPAS driver need to set camnoc axi clk freq
+ * @camnoc_bus_width : CAMNOC Bus width
+ * @camnoc_axi_clk_bw_margin : BW Margin in percentage to add while calculating
+ *      camnoc axi clock
+ * @camnoc_axi_min_ib_bw: Min camnoc BW which varies based on target
+ *
+ */
+struct cam_cpas_private_soc {
+	const char *arch_compat;
+	bool client_id_based;
+	uint32_t num_clients;
+	const char *client_name[CAM_CPAS_MAX_CLIENTS];
+	bool axi_camnoc_based;
+	const char *client_axi_port_name[CAM_CPAS_MAX_CLIENTS];
+	struct device_node *axi_port_list_node;
+	uint32_t num_vdd_ahb_mapping;
+	struct cam_cpas_vdd_ahb_mapping vdd_ahb[CAM_REGULATOR_LEVEL_MAX];
+	bool control_camnoc_axi_clk;
+	uint32_t camnoc_bus_width;
+	uint32_t camnoc_axi_clk_bw_margin;
+	uint64_t camnoc_axi_min_ib_bw;
+};
+
+int cam_cpas_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data);
+int cam_cpas_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_cpas_soc_enable_resources(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level default_level);
+int cam_cpas_soc_disable_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq);
+int cam_cpas_soc_disable_irq(struct cam_hw_soc_info *soc_info);
+#endif /* _CAM_CPAS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile b/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile
new file mode 100644
index 0000000..92025eb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_camsstop_hw.o
diff --git a/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
new file mode 100644
index 0000000..29427ff
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/camss_top/cam_camsstop_hw.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpas_soc.h"
+
+int cam_camsstop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CAMSS];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CAMERA_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->camera_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->camera_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr);
+
+	return 0;
+}
+
+int cam_camsstop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+			soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camss", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMSS] = index;
+	} else {
+		CAM_ERR(CAM_CPAS, "regbase not found for CAM_CPAS_REG_CAMSS");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_camsstop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		CAM_ERR(CAM_CPAS, "invalid NULL param");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_camsstop_get_hw_info;
+	internal_ops->init_hw_version = NULL;
+	internal_ops->handle_irq = NULL;
+	internal_ops->setup_regbase = cam_camsstop_setup_regbase_indices;
+	internal_ops->power_on = NULL;
+	internal_ops->power_off = NULL;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile
new file mode 100644
index 0000000..4aa2050
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cpastop_hw.o
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
new file mode 100644
index 0000000..4103769
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -0,0 +1,613 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+
+#include "cam_cpas_hw_intf.h"
+#include "cam_cpas_hw.h"
+#include "cam_cpastop_hw.h"
+#include "cam_io_util.h"
+#include "cam_cpas_soc.h"
+#include "cpastop100.h"
+#include "cpastop_v150_100.h"
+#include "cpastop_v170_110.h"
+#include "cpastop_v175_100.h"
+#include "cpastop_v175_101.h"
+
+struct cam_camnoc_info *camnoc_info;
+
+#define CAMNOC_SLAVE_MAX_ERR_CODE 7
+static const char * const camnoc_salve_err_code[] = {
+	"Target Error",              /* err code 0 */
+	"Address decode error",      /* err code 1 */
+	"Unsupported request",       /* err code 2 */
+	"Disconnected target",       /* err code 3 */
+	"Security violation",        /* err code 4 */
+	"Hidden security violation", /* err code 5 */
+	"Timeout Error",             /* err code 6 */
+	"Unknown Error",             /* unknown err code */
+};
+
+static int cam_cpastop_get_hw_info(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int32_t reg_indx = cpas_core->regbase_index[CAM_CPAS_REG_CPASTOP];
+	uint32_t reg_value;
+
+	if (reg_indx == -1)
+		return -EINVAL;
+
+	hw_caps->camera_family = CAM_FAMILY_CPAS_SS;
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x0);
+	hw_caps->camera_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff0000, 0x10);
+	hw_caps->camera_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff00, 0x8);
+	hw_caps->camera_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x4);
+	hw_caps->cpas_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->cpas_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->cpas_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[reg_indx].mem_base + 0x8);
+	hw_caps->camera_capability = reg_value;
+
+	CAM_DBG(CAM_FD, "Family %d, version %d.%d.%d, cpas %d.%d.%d, cap 0x%x",
+		hw_caps->camera_family, hw_caps->camera_version.major,
+		hw_caps->camera_version.minor, hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major, hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr, hw_caps->camera_capability);
+
+	soc_info->hw_version = CAM_CPAS_TITAN_NONE;
+
+	if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 7) &&
+		(hw_caps->camera_version.incr == 0)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_170_V100;
+		else if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 1) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_170_V110;
+		else if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 2) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_170_V120;
+	} else if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 7) &&
+		(hw_caps->camera_version.incr == 5)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_175_V100;
+		else if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 1))
+			soc_info->hw_version = CAM_CPAS_TITAN_175_V101;
+	} else if ((hw_caps->camera_version.major == 1) &&
+		(hw_caps->camera_version.minor == 5) &&
+		(hw_caps->camera_version.incr == 0)) {
+		if ((hw_caps->cpas_version.major == 1) &&
+			(hw_caps->cpas_version.minor == 0) &&
+			(hw_caps->cpas_version.incr == 0))
+			soc_info->hw_version = CAM_CPAS_TITAN_150_V100;
+	}
+
+	CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_info->hw_version);
+
+	return 0;
+}
+
+static int cam_cpastop_setup_regbase_indices(struct cam_hw_soc_info *soc_info,
+	int32_t regbase_index[], int32_t num_reg_map)
+{
+	uint32_t index;
+	int rc;
+
+	if (num_reg_map > CAM_CPAS_REG_MAX) {
+		CAM_ERR(CAM_CPAS, "invalid num_reg_map=%d", num_reg_map);
+		return -EINVAL;
+	}
+
+	if (soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) {
+		CAM_ERR(CAM_CPAS, "invalid num_mem_block=%d",
+			soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_cpas_top", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CPASTOP] = index;
+	} else {
+		CAM_ERR(CAM_CPAS, "regbase not found for CPASTOP, rc=%d, %d %d",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "cam_camnoc", &index);
+	if ((rc == 0) && (index < num_reg_map)) {
+		regbase_index[CAM_CPAS_REG_CAMNOC] = index;
+	} else {
+		CAM_ERR(CAM_CPAS, "regbase not found for CAMNOC, rc=%d, %d %d",
+			rc, index, num_reg_map);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_handle_errlogger(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info,
+	struct cam_camnoc_irq_slave_err_data *slave_err)
+{
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int err_code_index = 0;
+
+	if (!camnoc_info->err_logger) {
+		CAM_ERR_RATE_LIMIT(CAM_CPAS, "Invalid err logger info");
+		return -EINVAL;
+	}
+
+	slave_err->mainctrl.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->mainctrl);
+
+	slave_err->errvld.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errvld);
+
+	slave_err->errlog0_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog0_low);
+
+	slave_err->errlog0_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog0_high);
+
+	slave_err->errlog1_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog1_low);
+
+	slave_err->errlog1_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog1_high);
+
+	slave_err->errlog2_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog2_low);
+
+	slave_err->errlog2_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog2_high);
+
+	slave_err->errlog3_low.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog3_low);
+
+	slave_err->errlog3_high.value = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->err_logger->errlog3_high);
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"Possible memory configuration issue, fault at SMMU raised as CAMNOC SLAVE_IRQ");
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"mainctrl[0x%x 0x%x] errvld[0x%x 0x%x] stall_en=%d, fault_en=%d, err_vld=%d",
+		camnoc_info->err_logger->mainctrl,
+		slave_err->mainctrl.value,
+		camnoc_info->err_logger->errvld,
+		slave_err->errvld.value,
+		slave_err->mainctrl.stall_en,
+		slave_err->mainctrl.fault_en,
+		slave_err->errvld.err_vld);
+
+	err_code_index = slave_err->errlog0_low.err_code;
+	if (err_code_index > CAMNOC_SLAVE_MAX_ERR_CODE)
+		err_code_index = CAMNOC_SLAVE_MAX_ERR_CODE;
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"errlog0 low[0x%x 0x%x] high[0x%x 0x%x] loginfo_vld=%d, word_error=%d, non_secure=%d, device=%d, opc=%d, err_code=%d(%s) sizef=%d, addr_space=%d, len1=%d",
+		camnoc_info->err_logger->errlog0_low,
+		slave_err->errlog0_low.value,
+		camnoc_info->err_logger->errlog0_high,
+		slave_err->errlog0_high.value,
+		slave_err->errlog0_low.loginfo_vld,
+		slave_err->errlog0_low.word_error,
+		slave_err->errlog0_low.non_secure,
+		slave_err->errlog0_low.device,
+		slave_err->errlog0_low.opc,
+		slave_err->errlog0_low.err_code,
+		camnoc_salve_err_code[err_code_index],
+		slave_err->errlog0_low.sizef,
+		slave_err->errlog0_low.addr_space,
+		slave_err->errlog0_high.len1);
+
+	CAM_ERR_RATE_LIMIT(CAM_CPAS,
+		"errlog1_low[0x%x 0x%x]  errlog1_high[0x%x 0x%x] errlog2_low[0x%x 0x%x]  errlog2_high[0x%x 0x%x] errlog3_low[0x%x 0x%x]  errlog3_high[0x%x 0x%x]",
+		camnoc_info->err_logger->errlog1_low,
+		slave_err->errlog1_low.value,
+		camnoc_info->err_logger->errlog1_high,
+		slave_err->errlog1_high.value,
+		camnoc_info->err_logger->errlog2_low,
+		slave_err->errlog2_low.value,
+		camnoc_info->err_logger->errlog2_high,
+		slave_err->errlog2_high.value,
+		camnoc_info->err_logger->errlog3_low,
+		slave_err->errlog3_low.value,
+		camnoc_info->err_logger->errlog3_high,
+		slave_err->errlog3_high.value);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ubwc_enc_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i,
+	struct cam_camnoc_irq_ubwc_enc_data *enc_err)
+{
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	enc_err->encerr_status.value =
+		cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_err[i].err_status.offset);
+
+	/* Let clients handle the UBWC errors */
+	CAM_DBG(CAM_CPAS,
+		"ubwc enc err [%d]: offset[0x%x] value[0x%x]",
+		i, camnoc_info->irq_err[i].err_status.offset,
+		enc_err->encerr_status.value);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ubwc_dec_err(struct cam_cpas *cpas_core,
+	struct cam_hw_soc_info *soc_info, int i,
+	struct cam_camnoc_irq_ubwc_dec_data *dec_err)
+{
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+
+	dec_err->decerr_status.value =
+		cam_io_r_mb(soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_err[i].err_status.offset);
+
+	/* Let clients handle the UBWC errors */
+	CAM_DBG(CAM_CPAS,
+		"ubwc dec err status [%d]: offset[0x%x] value[0x%x] thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+		i, camnoc_info->irq_err[i].err_status.offset,
+		dec_err->decerr_status.value,
+		dec_err->decerr_status.thr_err,
+		dec_err->decerr_status.fcl_err,
+		dec_err->decerr_status.len_md_err,
+		dec_err->decerr_status.format_err);
+
+	return 0;
+}
+
+static int cam_cpastop_handle_ahb_timeout_err(struct cam_hw_info *cpas_hw,
+	struct cam_camnoc_irq_ahb_timeout_data *ahb_err)
+{
+	CAM_ERR_RATE_LIMIT(CAM_CPAS, "ahb timeout error");
+
+	return 0;
+}
+
+static int cam_cpastop_disable_test_irq(struct cam_hw_info *cpas_hw)
+{
+	camnoc_info->irq_sbm->sbm_clear.value &= ~0x4;
+	camnoc_info->irq_sbm->sbm_enable.value &= ~0x100;
+	camnoc_info->irq_err[CAM_CAMNOC_HW_IRQ_CAMNOC_TEST].enable = false;
+
+	return 0;
+}
+
+static int cam_cpastop_reset_irq(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_clear);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_clear);
+	}
+
+	cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+		&camnoc_info->irq_sbm->sbm_enable);
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if (camnoc_info->irq_err[i].enable)
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->irq_err[i].err_enable);
+	}
+
+	return 0;
+}
+
+static void cam_cpastop_notify_clients(struct cam_cpas *cpas_core,
+	struct cam_cpas_irq_data *irq_data)
+{
+	int i;
+	struct cam_cpas_client *cpas_client;
+	bool error_handled = false;
+
+	CAM_DBG(CAM_CPAS,
+		"Notify CB : num_clients=%d, registered=%d, started=%d",
+		cpas_core->num_clients, cpas_core->registered_clients,
+		cpas_core->streamon_clients);
+
+	for (i = 0; i < cpas_core->num_clients; i++) {
+		if (CAM_CPAS_CLIENT_STARTED(cpas_core, i)) {
+			cpas_client = cpas_core->cpas_client[i];
+			if (cpas_client->data.cam_cpas_client_cb) {
+				CAM_DBG(CAM_CPAS,
+					"Calling client CB %d : %d",
+					i, irq_data->irq_type);
+				error_handled =
+					cpas_client->data.cam_cpas_client_cb(
+					cpas_client->data.client_handle,
+					cpas_client->data.userdata,
+					irq_data);
+				if (error_handled)
+					break;
+			}
+		}
+	}
+}
+
+static void cam_cpastop_work(struct work_struct *work)
+{
+	struct cam_cpas_work_payload *payload;
+	struct cam_hw_info *cpas_hw;
+	struct cam_cpas *cpas_core;
+	struct cam_hw_soc_info *soc_info;
+	int i;
+	enum cam_camnoc_hw_irq_type irq_type;
+	struct cam_cpas_irq_data irq_data;
+
+	payload = container_of(work, struct cam_cpas_work_payload, work);
+	if (!payload) {
+		CAM_ERR(CAM_CPAS, "NULL payload");
+		return;
+	}
+
+	cpas_hw = payload->hw;
+	cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	soc_info = &cpas_hw->soc_info;
+
+	if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+		CAM_ERR(CAM_CPAS, "CPAS off");
+		return;
+	}
+
+	for (i = 0; i < camnoc_info->irq_err_size; i++) {
+		if ((payload->irq_status & camnoc_info->irq_err[i].sbm_port) &&
+			(camnoc_info->irq_err[i].enable)) {
+			irq_type = camnoc_info->irq_err[i].irq_type;
+			CAM_ERR_RATE_LIMIT(CAM_CPAS,
+				"Error occurred, type=%d", irq_type);
+			memset(&irq_data, 0x0, sizeof(irq_data));
+			irq_data.irq_type = (enum cam_camnoc_irq_type)irq_type;
+
+			switch (irq_type) {
+			case CAM_CAMNOC_HW_IRQ_SLAVE_ERROR:
+				cam_cpastop_handle_errlogger(
+					cpas_core, soc_info,
+					&irq_data.u.slave_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR:
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+				cam_cpastop_handle_ubwc_enc_err(
+					cpas_core, soc_info, i,
+					&irq_data.u.enc_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+				cam_cpastop_handle_ubwc_dec_err(
+					cpas_core, soc_info, i,
+					&irq_data.u.dec_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT:
+				cam_cpastop_handle_ahb_timeout_err(
+					cpas_hw, &irq_data.u.ahb_err);
+				break;
+			case CAM_CAMNOC_HW_IRQ_CAMNOC_TEST:
+				CAM_DBG(CAM_CPAS, "TEST IRQ");
+				break;
+			default:
+				CAM_ERR(CAM_CPAS, "Invalid IRQ type");
+				break;
+			}
+
+			cam_cpastop_notify_clients(cpas_core, &irq_data);
+
+			payload->irq_status &=
+				~camnoc_info->irq_err[i].sbm_port;
+		}
+	}
+	atomic_dec(&cpas_core->irq_count);
+	wake_up(&cpas_core->irq_count_wq);
+	CAM_DBG(CAM_CPAS, "irq_count=%d\n", atomic_read(&cpas_core->irq_count));
+
+	if (payload->irq_status)
+		CAM_ERR(CAM_CPAS, "IRQ not handled irq_status=0x%x",
+			payload->irq_status);
+
+	kfree(payload);
+}
+
+static irqreturn_t cam_cpastop_handle_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *cpas_hw = (struct cam_hw_info *)data;
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	struct cam_cpas_work_payload *payload;
+
+	if (!atomic_inc_not_zero(&cpas_core->irq_count)) {
+		CAM_ERR(CAM_CPAS, "CPAS off");
+		return IRQ_HANDLED;
+	}
+
+	payload = kzalloc(sizeof(struct cam_cpas_work_payload), GFP_ATOMIC);
+	if (!payload)
+		goto done;
+
+	payload->irq_status = cam_io_r_mb(
+		soc_info->reg_map[camnoc_index].mem_base +
+		camnoc_info->irq_sbm->sbm_status.offset);
+
+	CAM_DBG(CAM_CPAS, "IRQ callback, irq_status=0x%x", payload->irq_status);
+
+	payload->hw = cpas_hw;
+	INIT_WORK((struct work_struct *)&payload->work, cam_cpastop_work);
+
+	if (TEST_IRQ_ENABLE)
+		cam_cpastop_disable_test_irq(cpas_hw);
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	queue_work(cpas_core->work_queue, &payload->work);
+done:
+	atomic_dec(&cpas_core->irq_count);
+	wake_up(&cpas_core->irq_count_wq);
+
+	return IRQ_HANDLED;
+}
+
+static int cam_cpastop_poweron(struct cam_hw_info *cpas_hw)
+{
+	int i;
+
+	cam_cpastop_reset_irq(cpas_hw);
+
+	for (i = 0; i < camnoc_info->specific_size; i++) {
+		if (camnoc_info->specific[i].enable) {
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_low);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].priority_lut_high);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].urgency);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].danger_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].safe_lut);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].ubwc_ctl);
+			cam_cpas_util_reg_update(cpas_hw, CAM_CPAS_REG_CAMNOC,
+				&camnoc_info->specific[i].flag_out_set0_low);
+		}
+	}
+
+	return 0;
+}
+
+static int cam_cpastop_poweroff(struct cam_hw_info *cpas_hw)
+{
+	struct cam_cpas *cpas_core = (struct cam_cpas *) cpas_hw->core_info;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+	int camnoc_index = cpas_core->regbase_index[CAM_CPAS_REG_CAMNOC];
+	int rc = 0;
+	struct cam_cpas_hw_errata_wa_list *errata_wa_list =
+		camnoc_info->errata_wa_list;
+
+	if (!errata_wa_list)
+		return 0;
+
+	if (errata_wa_list->camnoc_flush_slave_pending_trans.enable) {
+		struct cam_cpas_hw_errata_wa *errata_wa =
+			&errata_wa_list->camnoc_flush_slave_pending_trans;
+
+		rc = cam_io_poll_value_wmask(
+			soc_info->reg_map[camnoc_index].mem_base +
+			errata_wa->data.reg_info.offset,
+			errata_wa->data.reg_info.value,
+			errata_wa->data.reg_info.mask,
+			CAM_CPAS_POLL_RETRY_CNT,
+			CAM_CPAS_POLL_MIN_USECS, CAM_CPAS_POLL_MAX_USECS);
+		if (rc) {
+			CAM_DBG(CAM_CPAS,
+				"camnoc flush slave pending trans failed");
+			/* Do not return error, passthrough */
+			rc = 0;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_cpastop_init_hw_version(struct cam_hw_info *cpas_hw,
+	struct cam_cpas_hw_caps *hw_caps)
+{
+	int rc = 0;
+	struct cam_hw_soc_info *soc_info = &cpas_hw->soc_info;
+
+	CAM_DBG(CAM_CPAS,
+		"hw_version=0x%x Camera Version %d.%d.%d, cpas version %d.%d.%d",
+		soc_info->hw_version,
+		hw_caps->camera_version.major,
+		hw_caps->camera_version.minor,
+		hw_caps->camera_version.incr,
+		hw_caps->cpas_version.major,
+		hw_caps->cpas_version.minor,
+		hw_caps->cpas_version.incr);
+
+	switch (soc_info->hw_version) {
+	case CAM_CPAS_TITAN_170_V100:
+		camnoc_info = &cam170_cpas100_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_170_V110:
+		camnoc_info = &cam170_cpas110_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_175_V100:
+		camnoc_info = &cam175_cpas100_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_175_V101:
+		camnoc_info = &cam175_cpas101_camnoc_info;
+		break;
+	case CAM_CPAS_TITAN_150_V100:
+		camnoc_info = &cam150_cpas100_camnoc_info;
+		break;
+	default:
+		CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
+			hw_caps->camera_version.major,
+			hw_caps->camera_version.minor,
+			hw_caps->camera_version.incr);
+		rc = -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+int cam_cpastop_get_internal_ops(struct cam_cpas_internal_ops *internal_ops)
+{
+	if (!internal_ops) {
+		CAM_ERR(CAM_CPAS, "invalid NULL param");
+		return -EINVAL;
+	}
+
+	internal_ops->get_hw_info = cam_cpastop_get_hw_info;
+	internal_ops->init_hw_version = cam_cpastop_init_hw_version;
+	internal_ops->handle_irq = cam_cpastop_handle_irq;
+	internal_ops->setup_regbase = cam_cpastop_setup_regbase_indices;
+	internal_ops->power_on = cam_cpastop_poweron;
+	internal_ops->power_off = cam_cpastop_poweroff;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
new file mode 100644
index 0000000..ca2ff06
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPASTOP_HW_H_
+#define _CAM_CPASTOP_HW_H_
+
+#include "cam_cpas_api.h"
+#include "cam_cpas_hw.h"
+
+/**
+ * enum cam_camnoc_hw_irq_type - Enum for camnoc error types
+ *
+ * @CAM_CAMNOC_HW_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                                 1 QHB port) has an error logger. The error
+ *                                 observed at any slave port is logged into
+ *                                 the error logger register and an IRQ is
+ *                                 triggered
+ * @CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE0 UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error
+ *                                               detected in the IFE1 or IFE3
+ *                                               UBWC encoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS
+ *                                               UBWC decoder instance
+ * @CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error
+ *                                               detected in the IPE/BPS UBWC
+ *                                               encoder instance
+ * @CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP
+ *                                               slave  times out after 4000
+ *                                               AHB cycles
+ * @CAM_CAMNOC_HW_IRQ_RESERVED1                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_RESERVED2                : Reserved
+ * @CAM_CAMNOC_HW_IRQ_CAMNOC_TEST              : To test the IRQ logic
+ */
+enum cam_camnoc_hw_irq_type {
+	CAM_CAMNOC_HW_IRQ_SLAVE_ERROR =
+		CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR =
+		CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT =
+		CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+	CAM_CAMNOC_HW_IRQ_RESERVED1,
+	CAM_CAMNOC_HW_IRQ_RESERVED2,
+	CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+};
+
+/**
+ * enum cam_camnoc_port_type - Enum for different camnoc hw ports. All CAMNOC
+ *         settings like QoS, LUT mappings need to be configured for
+ *         each of these ports.
+ *
+ * @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc
+ * @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc
+ * @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_READ: Indicates IPE, BPS, LRME Read HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_IPE_BPS_LRME_WRITE: Indicates IPE, BPS, LRME Write HW
+ *         connection to camnoc
+ * @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc
+ * @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc
+ * @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc
+ */
+enum cam_camnoc_port_type {
+	CAM_CAMNOC_CDM,
+	CAM_CAMNOC_IFE02,
+	CAM_CAMNOC_IFE13,
+	CAM_CAMNOC_IPE_BPS_LRME_READ,
+	CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+	CAM_CAMNOC_JPEG,
+	CAM_CAMNOC_FD,
+	CAM_CAMNOC_ICP,
+};
+
+/**
+ * struct cam_camnoc_specific : CPAS camnoc specific settings
+ *
+ * @port_type: Port type
+ * @enable: Whether to enable settings for this connection
+ * @priority_lut_low: Priority Low LUT mapping for this connection
+ * @priority_lut_high: Priority High LUT mapping for this connection
+ * @urgency: Urgency (QoS) settings for this connection
+ * @danger_lut: Danger LUT mapping for this connection
+ * @safe_lut: Safe LUT mapping for this connection
+ * @ubwc_ctl: UBWC control settings for this connection
+ *
+ */
+struct cam_camnoc_specific {
+	enum cam_camnoc_port_type port_type;
+	bool enable;
+	struct cam_cpas_reg priority_lut_low;
+	struct cam_cpas_reg priority_lut_high;
+	struct cam_cpas_reg urgency;
+	struct cam_cpas_reg danger_lut;
+	struct cam_cpas_reg safe_lut;
+	struct cam_cpas_reg ubwc_ctl;
+	struct cam_cpas_reg flag_out_set0_low;
+};
+
+/**
+ * struct cam_camnoc_irq_sbm : Sideband manager settings for all CAMNOC IRQs
+ *
+ * @sbm_enable: SBM settings for IRQ enable
+ * @sbm_status: SBM settings for IRQ status
+ * @sbm_clear: SBM settings for IRQ clear
+ *
+ */
+struct cam_camnoc_irq_sbm {
+	struct cam_cpas_reg sbm_enable;
+	struct cam_cpas_reg sbm_status;
+	struct cam_cpas_reg sbm_clear;
+};
+
+/**
+ * struct cam_camnoc_irq_err : Error settings specific to each CAMNOC IRQ
+ *
+ * @irq_type: Type of IRQ
+ * @enable: Whether to enable error settings for this IRQ
+ * @sbm_port: Corresponding SBM port for this IRQ
+ * @err_enable: Error enable settings for this IRQ
+ * @err_status: Error status settings for this IRQ
+ * @err_clear: Error clear settings for this IRQ
+ *
+ */
+struct cam_camnoc_irq_err {
+	enum cam_camnoc_hw_irq_type irq_type;
+	bool enable;
+	uint32_t sbm_port;
+	struct cam_cpas_reg err_enable;
+	struct cam_cpas_reg err_status;
+	struct cam_cpas_reg err_clear;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa : Struct for HW errata workaround info
+ *
+ * @enable: Whether to enable this errata workround
+ * @data: HW Errata workaround data
+ *
+ */
+struct cam_cpas_hw_errata_wa {
+	bool enable;
+	union {
+		struct cam_cpas_reg reg_info;
+	} data;
+};
+
+/**
+ * struct cam_cpas_hw_errata_wa_list : List of HW Errata workaround info
+ *
+ * @camnoc_flush_slave_pending_trans: Errata workaround info for flushing
+ *         camnoc slave pending transactions before turning off CPAS_TOP gdsc
+ *
+ */
+struct cam_cpas_hw_errata_wa_list {
+	struct cam_cpas_hw_errata_wa camnoc_flush_slave_pending_trans;
+};
+
+/**
+ * struct cam_camnoc_err_logger_info : CAMNOC error logger register offsets
+ *
+ * @mainctrl: Register offset for mainctrl
+ * @errvld: Register offset for errvld
+ * @errlog0_low: Register offset for errlog0_low
+ * @errlog0_high: Register offset for errlog0_high
+ * @errlog1_low: Register offset for errlog1_low
+ * @errlog1_high: Register offset for errlog1_high
+ * @errlog2_low: Register offset for errlog2_low
+ * @errlog2_high: Register offset for errlog2_high
+ * @errlog3_low: Register offset for errlog3_low
+ * @errlog3_high: Register offset for errlog3_high
+ *
+ */
+struct cam_camnoc_err_logger_info {
+	uint32_t mainctrl;
+	uint32_t errvld;
+	uint32_t errlog0_low;
+	uint32_t errlog0_high;
+	uint32_t errlog1_low;
+	uint32_t errlog1_high;
+	uint32_t errlog2_low;
+	uint32_t errlog2_high;
+	uint32_t errlog3_low;
+	uint32_t errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_info : Overall CAMNOC settings info
+ *
+ * @specific: Pointer to CAMNOC SPECIFICTONTTPTR settings
+ * @specific_size: Array size of SPECIFICTONTTPTR settings
+ * @irq_sbm: Pointer to CAMNOC IRQ SBM settings
+ * @irq_err: Pointer to CAMNOC IRQ Error settings
+ * @irq_err_size: Array size of IRQ Error settings
+ * @err_logger: Pointer to CAMNOC IRQ Error logger read registers
+ * @errata_wa_list: HW Errata workaround info
+ *
+ */
+struct cam_camnoc_info {
+	struct cam_camnoc_specific *specific;
+	int specific_size;
+	struct cam_camnoc_irq_sbm *irq_sbm;
+	struct cam_camnoc_irq_err *irq_err;
+	int irq_err_size;
+	struct cam_camnoc_err_logger_info *err_logger;
+	struct cam_cpas_hw_errata_wa_list *errata_wa_list;
+};
+
+/**
+ * struct cam_cpas_work_payload : Struct for cpas work payload data
+ *
+ * @hw: Pointer to HW info
+ * @irq_status: IRQ status value
+ * @irq_data: IRQ data
+ * @work: Work handle
+ *
+ */
+struct cam_cpas_work_payload {
+	struct cam_hw_info *hw;
+	uint32_t irq_status;
+	uint32_t irq_data;
+	struct work_struct work;
+};
+
+#endif /* _CAM_CPASTOP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
new file mode 100644
index 0000000..84bb9e2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop100.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP100_H_
+#define _CPASTOP100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x3,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x3,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = false,
+	}
+};
+
+static struct cam_camnoc_err_logger_info cam170_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam170_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = true,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+struct cam_camnoc_info cam170_cpas100_camnoc_info = {
+	.specific = &cam_cpas100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas100_camnoc_specific) /
+		sizeof(cam_cpas100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas100_irq_sbm,
+	.irq_err = &cam_cpas100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas100_irq_err) /
+		sizeof(cam_cpas100_irq_err[0]),
+	.err_logger = &cam170_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam170_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v150_100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v150_100.h
new file mode 100644
index 0000000..8ba4cd8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v150_100.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V150_100_H_
+#define _CPASTOP_V150_100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v150_100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v150_100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v150_100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 0x5,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam150_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam150_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam150_cpas100_camnoc_info = {
+	.specific = &cam_cpas_v150_100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas_v150_100_camnoc_specific) /
+		sizeof(cam_cpas_v150_100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas_v150_100_irq_sbm,
+	.irq_err = &cam_cpas_v150_100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas_v150_100_irq_err) /
+		sizeof(cam_cpas_v150_100_irq_err[0]),
+	.err_logger = &cam150_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam150_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V150_100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
new file mode 100644
index 0000000..c7d700e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v170_110.h
@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V170_110_H_
+#define _CPASTOP_V170_110_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas110_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas110_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas110_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam170_cpas110_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam170_cpas110_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam170_cpas110_camnoc_info = {
+	.specific = &cam_cpas110_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas110_camnoc_specific) /
+		sizeof(cam_cpas110_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas110_irq_sbm,
+	.irq_err = &cam_cpas110_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas110_irq_err) /
+		sizeof(cam_cpas110_irq_err[0]),
+	.err_logger = &cam170_cpas110_err_logger_offsets,
+	.errata_wa_list = &cam170_cpas110_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V170_110_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_100.h
new file mode 100644
index 0000000..6738bdf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_100.h
@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V175_100_H_
+#define _CPASTOP_V175_100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v175_100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v175_100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v175_100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam175_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam175_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam175_cpas100_camnoc_info = {
+	.specific = &cam_cpas_v175_100_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas_v175_100_camnoc_specific) /
+		sizeof(cam_cpas_v175_100_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas_v175_100_irq_sbm,
+	.irq_err = &cam_cpas_v175_100_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas_v175_100_irq_err) /
+		sizeof(cam_cpas_v175_100_irq_err[0]),
+	.err_logger = &cam175_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam175_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V175_100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_101.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_101.h
new file mode 100644
index 0000000..b00678a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v175_101.h
@@ -0,0 +1,558 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V175_101_H_
+#define _CPASTOP_V175_101_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v175_101_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = true,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v175_101_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* SPECIFIC_IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* SPECIFIC_IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* SPECIFIC_IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* SPECIFIC_IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* SPECIFIC_IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* SPECIFIC_IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* SPECIFIC_IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* SPECIFIC_IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* SPECIFIC_IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* SPECIFIC_IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* SPECIFIC_IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* SPECIFIC_IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v175_101_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* SPECIFIC_CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* SPECIFIC_CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* SPECIFIC_CDM_URGENCY_LOW */
+			.mask = 0x7, /* SPECIFIC_CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* SPECIFIC_CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* SPECIFIC_CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* SPECIFIC_CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE02,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x430, /* SPECIFIC_IFE02_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x434, /* SPECIFIC_IFE02_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x438, /* SPECIFIC_IFE02_URGENCY_LOW */
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE02_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x440, /* SPECIFIC_IFE02_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x448, /* SPECIFIC_IFE02_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x588, /* SPECIFIC_IFE02_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE13,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x830, /* SPECIFIC_IFE13_PRIORITYLUT_LOW */
+			.value = 0x66666543,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x834, /* SPECIFIC_IFE13_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x838, /* SPECIFIC_IFE13_URGENCY_LOW */
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IFE13_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x840, /* SPECIFIC_IFE13_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x848, /* SPECIFIC_IFE13_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x988, /* SPECIFIC_IFE13_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_READ,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc30, /* SPECIFIC_IBL_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc34, /* SPECIFIC_IBL_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xc38, /* SPECIFIC_IBL_RD_URGENCY_LOW */
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* SPECIFIC_IBL_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc40, /* SPECIFIC_IBL_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xc48, /* SPECIFIC_IBL_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xd08, /* SPECIFIC_IBL_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_LRME_WRITE,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* SPECIFIC_IBL_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* SPECIFIC_IBL_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* SPECIFIC_IBL_WR_URGENCY_LOW */
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* SPECIFIC_IBL_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1040, /* SPECIFIC_IBL_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1048, /* SPECIFIC_IBL_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1188, /* SPECIFIC_IBL_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* SPECIFIC_JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* SPECIFIC_JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1438, /* SPECIFIC_JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1440, /* SPECIFIC_JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1448, /* SPECIFIC_JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = false,
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam175_cpas101_err_logger_offsets = {
+	.mainctrl     =  0x2708, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x2710, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x2720, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x2724, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x2728, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x272c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x2730, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x2734, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x2738, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x273c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam175_cpas101_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam175_cpas101_camnoc_info = {
+	.specific = &cam_cpas_v175_101_camnoc_specific[0],
+	.specific_size = sizeof(cam_cpas_v175_101_camnoc_specific) /
+		sizeof(cam_cpas_v175_101_camnoc_specific[0]),
+	.irq_sbm = &cam_cpas_v175_101_irq_sbm,
+	.irq_err = &cam_cpas_v175_101_irq_err[0],
+	.irq_err_size = sizeof(cam_cpas_v175_101_irq_err) /
+		sizeof(cam_cpas_v175_101_irq_err[0]),
+	.err_logger = &cam175_cpas101_err_logger_offsets,
+	.errata_wa_list = &cam175_cpas101_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V175_101_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
new file mode 100644
index 0000000..77a5172
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -0,0 +1,511 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CPAS_API_H_
+#define _CAM_CPAS_API_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <media/cam_cpas.h>
+#include "cam_soc_util.h"
+
+#define CAM_HW_IDENTIFIER_LENGTH 128
+
+/* Default AXI Bandwidth vote */
+#define CAM_CPAS_DEFAULT_AXI_BW 1024
+
+/**
+ * enum cam_cpas_reg_base - Enum for register base identifier. These
+ *                          are the identifiers used in generic register
+ *                          write/read APIs provided by cpas driver.
+ */
+enum cam_cpas_reg_base {
+	CAM_CPAS_REG_CPASTOP,
+	CAM_CPAS_REG_CAMNOC,
+	CAM_CPAS_REG_CAMSS,
+	CAM_CPAS_REG_MAX
+};
+
+/**
+ * enum cam_cpas_hw_version - Enum for Titan CPAS HW Versions
+ */
+enum cam_cpas_hw_version {
+	CAM_CPAS_TITAN_NONE = 0,
+	CAM_CPAS_TITAN_150_V100 = 0x150100,
+	CAM_CPAS_TITAN_170_V100 = 0x170100,
+	CAM_CPAS_TITAN_170_V110 = 0x170110,
+	CAM_CPAS_TITAN_170_V120 = 0x170120,
+	CAM_CPAS_TITAN_175_V100 = 0x175100,
+	CAM_CPAS_TITAN_175_V101 = 0x175101,
+	CAM_CPAS_TITAN_MAX
+};
+
+
+/**
+ * enum cam_camnoc_irq_type - Enum for camnoc irq types
+ *
+ * @CAM_CAMNOC_IRQ_SLAVE_ERROR: Each slave port in CAMNOC (3 QSB ports and
+ *                              1 QHB port) has an error logger. The error
+ *                              observed at any slave port is logged into
+ *                              the error logger register and an IRQ is
+ *                              triggered
+ * @CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE0 UBWC encoder instance
+ * @CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR  : Triggered if any error detected
+ *                                            in the IFE1 or IFE3 UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC decoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR: Triggered if any error detected
+ *                                            in the IPE/BPS UBWC encoder
+ *                                            instance
+ * @CAM_CAMNOC_IRQ_AHB_TIMEOUT              : Triggered when the QHS_ICP slave
+ *                                            times out after 4000 AHB cycles
+ */
+enum cam_camnoc_irq_type {
+	CAM_CAMNOC_IRQ_SLAVE_ERROR,
+	CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+	CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+	CAM_CAMNOC_IRQ_AHB_TIMEOUT,
+};
+
+/**
+ * struct cam_camnoc_irq_slave_err_data : Data for Slave error.
+ *
+ * @mainctrl     : Err logger mainctrl info
+ * @errvld       : Err logger errvld info
+ * @errlog0_low  : Err logger errlog0_low info
+ * @errlog0_high : Err logger errlog0_high info
+ * @errlog1_low  : Err logger errlog1_low info
+ * @errlog1_high : Err logger errlog1_high info
+ * @errlog2_low  : Err logger errlog2_low info
+ * @errlog2_high : Err logger errlog2_high info
+ * @errlog3_low  : Err logger errlog3_low info
+ * @errlog3_high : Err logger errlog3_high info
+ *
+ */
+struct cam_camnoc_irq_slave_err_data {
+	union {
+		struct {
+			uint32_t stall_en : 1; /* bit 0 */
+			uint32_t fault_en : 1; /* bit 1 */
+			uint32_t rsv      : 30; /* bits 2-31 */
+		};
+		uint32_t value;
+	} mainctrl;
+	union {
+		struct {
+			uint32_t err_vld : 1; /* bit 0 */
+			uint32_t rsv     : 31; /* bits 1-31 */
+		};
+		uint32_t value;
+	} errvld;
+	union {
+		struct {
+			uint32_t loginfo_vld : 1; /* bit 0 */
+			uint32_t word_error  : 1; /* bit 1 */
+			uint32_t non_secure  : 1; /* bit 2 */
+			uint32_t device      : 1; /* bit 3 */
+			uint32_t opc         : 3; /* bits 4 - 6 */
+			uint32_t rsv0        : 1; /* bit 7 */
+			uint32_t err_code    : 3; /* bits 8 - 10 */
+			uint32_t sizef       : 3; /* bits 11 - 13 */
+			uint32_t rsv1        : 2; /* bits 14 - 15 */
+			uint32_t addr_space  : 6; /* bits 16 - 21 */
+			uint32_t rsv2        : 10; /* bits 22 - 31 */
+		};
+		uint32_t value;
+	}  errlog0_low;
+	union {
+		struct {
+			uint32_t len1 : 10; /* bits 0 - 9 */
+			uint32_t rsv  : 22; /* bits 10 - 31 */
+		};
+		uint32_t value;
+	} errlog0_high;
+	union {
+		struct {
+			uint32_t path : 16; /* bits 0 - 15 */
+			uint32_t rsv  : 16; /* bits 16 - 31 */
+		};
+		uint32_t value;
+	} errlog1_low;
+	union {
+		struct {
+			uint32_t extid : 18; /* bits 0 - 17 */
+			uint32_t rsv   : 14; /* bits 18 - 31 */
+		};
+		uint32_t value;
+	} errlog1_high;
+	union {
+		struct {
+			uint32_t errlog2_lsb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog2_low;
+	union {
+		struct {
+			uint32_t errlog2_msb : 16; /* bits 0 - 16 */
+			uint32_t rsv         : 16; /* bits 16 - 31 */
+		};
+		uint32_t value;
+	} errlog2_high;
+	union {
+		struct {
+			uint32_t errlog3_lsb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog3_low;
+	union {
+		struct {
+			uint32_t errlog3_msb : 32; /* bits 0 - 31 */
+		};
+		uint32_t value;
+	} errlog3_high;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_enc_data : Data for UBWC Encode error.
+ *
+ * @encerr_status : Encode error status
+ *
+ */
+struct cam_camnoc_irq_ubwc_enc_data {
+	union {
+		struct {
+			uint32_t encerrstatus : 3; /* bits 0 - 2 */
+			uint32_t rsv          : 29; /* bits 3 - 31 */
+		};
+		uint32_t value;
+	} encerr_status;
+};
+
+/**
+ * struct cam_camnoc_irq_ubwc_dec_data : Data for UBWC Decode error.
+ *
+ * @decerr_status : Decoder error status
+ * @thr_err       : Set to 1 if
+ *                  At least one of the bflc_len fields in the bit steam exceeds
+ *                  its threshold value. This error is possible only for
+ *                  RGBA1010102, TP10, and RGB565 formats
+ * @fcl_err       : Set to 1 if
+ *                  Fast clear with a legal non-RGB format
+ * @len_md_err    : Set to 1 if
+ *                  The calculated burst length does not match burst length
+ *                  specified by the metadata value
+ * @format_err    : Set to 1 if
+ *                  Illegal format
+ *                  1. bad format :2,3,6
+ *                  2. For 32B MAL, metadata=6
+ *                  3. For 32B MAL RGB565, Metadata != 0,1,7
+ *                  4. For 64B MAL RGB565, metadata[3:1] == 1,2
+ *
+ */
+struct cam_camnoc_irq_ubwc_dec_data {
+	union {
+		struct {
+			uint32_t thr_err    : 1; /* bit 0 */
+			uint32_t fcl_err    : 1; /* bit 1 */
+			uint32_t len_md_err : 1; /* bit 2 */
+			uint32_t format_err : 1; /* bit 3 */
+			uint32_t rsv        : 28; /* bits 4 - 31 */
+		};
+		uint32_t value;
+	} decerr_status;
+};
+
+struct cam_camnoc_irq_ahb_timeout_data {
+	uint32_t data;
+};
+
+/**
+ * struct cam_cpas_irq_data : CAMNOC IRQ data
+ *
+ * @irq_type  : To identify the type of IRQ
+ * @u         : Union of irq err data information
+ * @slave_err : Data for Slave error.
+ *              Valid if type is CAM_CAMNOC_IRQ_SLAVE_ERROR
+ * @enc_err   : Data for UBWC Encode error.
+ *              Valid if type is one of below:
+ *              CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR
+ *              CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR
+ *              CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR
+ * @dec_err   : Data for UBWC Decode error.
+ *              Valid if type is CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR
+ * @ahb_err   : Data for Slave error.
+ *              Valid if type is CAM_CAMNOC_IRQ_AHB_TIMEOUT
+ *
+ */
+struct cam_cpas_irq_data {
+	enum cam_camnoc_irq_type irq_type;
+	union {
+		struct cam_camnoc_irq_slave_err_data   slave_err;
+		struct cam_camnoc_irq_ubwc_enc_data    enc_err;
+		struct cam_camnoc_irq_ubwc_dec_data    dec_err;
+		struct cam_camnoc_irq_ahb_timeout_data ahb_err;
+	} u;
+};
+
+/**
+ * struct cam_cpas_register_params : Register params for cpas client
+ *
+ * @identifier        : Input identifier string which is the device label
+ *                      from dt like vfe, ife, jpeg etc
+ * @cell_index        : Input integer identifier pointing to the cell index
+ *                      from dt of the device. This can be used to form a
+ *                      unique string with @identifier like vfe0, ife1,
+ *                      jpeg0, etc
+ * @dev               : device handle
+ * @userdata          : Input private data which will be passed as
+ *                      an argument while callback.
+ * @cam_cpas_callback : Input callback pointer for triggering the
+ *                      callbacks from CPAS driver.
+ *                      @client_handle : CPAS client handle
+ *                      @userdata    : User data given at the time of register
+ *                      @event_type  : event type
+ *                      @event_data  : event data
+ * @client_handle       : Output Unique handle generated for this register
+ *
+ */
+struct cam_cpas_register_params {
+	char            identifier[CAM_HW_IDENTIFIER_LENGTH];
+	uint32_t        cell_index;
+	struct device  *dev;
+	void           *userdata;
+	bool          (*cam_cpas_client_cb)(
+			uint32_t                  client_handle,
+			void                     *userdata,
+			struct cam_cpas_irq_data *irq_data);
+	uint32_t        client_handle;
+};
+
+/**
+ * enum cam_vote_type - Enum for voting type
+ *
+ * @CAM_VOTE_ABSOLUTE : Absolute vote
+ * @CAM_VOTE_DYNAMIC  : Dynamic vote
+ */
+enum cam_vote_type {
+	CAM_VOTE_ABSOLUTE,
+	CAM_VOTE_DYNAMIC,
+};
+
+/**
+ * struct cam_ahb_vote : AHB vote
+ *
+ * @type  : AHB voting type.
+ *          CAM_VOTE_ABSOLUTE : vote based on the value 'level' is set
+ *          CAM_VOTE_DYNAMIC  : vote calculated dynamically using 'freq'
+ *                              and 'dev' handle is set
+ * @level : AHB vote level
+ * @freq  : AHB vote dynamic frequency
+ *
+ */
+struct cam_ahb_vote {
+	enum cam_vote_type   type;
+	union {
+		enum cam_vote_level  level;
+		unsigned long        freq;
+	} vote;
+};
+
+/**
+ * struct cam_axi_vote : AXI vote
+ *
+ * @uncompressed_bw : Bus bandwidth required in Bytes for uncompressed data
+ *                    This is the required bandwidth for uncompressed
+ *                    data traffic between hw core and camnoc.
+ * @compressed_bw   : Bus bandwidth required in Bytes for compressed data.
+ *                    This is the required bandwidth for compressed
+ *                    data traffic between camnoc and mmnoc.
+ *
+ * If one of the above is not applicable to a hw client, it has to
+ * fill the same values in both.
+ *
+ */
+struct cam_axi_vote {
+	uint64_t   uncompressed_bw;
+	uint64_t   compressed_bw;
+};
+
+/**
+ * cam_cpas_register_client()
+ *
+ * @brief: API to register cpas client
+ *
+ * @register_params: Input params to register as a client to CPAS
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_register_client(
+	struct cam_cpas_register_params *register_params);
+
+/**
+ * cam_cpas_unregister_client()
+ *
+ * @brief: API to unregister cpas client
+ *
+ * @client_handle: Client handle to be unregistered
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_unregister_client(uint32_t client_handle);
+
+/**
+ * cam_cpas_start()
+ *
+ * @brief: API to start cpas client hw. Clients have to vote for minimal
+ *     bandwidth requirements for AHB, AXI. Use cam_cpas_update_ahb_vote
+ *     to scale bandwidth after start.
+ *
+ * @client_handle: client cpas handle
+ * @ahb_vote     : Pointer to ahb vote info
+ * @axi_vote     : Pointer to axi bandwidth vote info
+ *
+ * If AXI vote is not applicable to a particular client, use the value exposed
+ * by CAM_CPAS_DEFAULT_AXI_BW as the default vote request.
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_start(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote,
+	struct cam_axi_vote   *axi_vote);
+
+/**
+ * cam_cpas_stop()
+ *
+ * @brief: API to stop cpas client hw. Bandwidth for AHB, AXI votes
+ *     would be removed for this client on this call. Clients should not
+ *     use cam_cpas_update_ahb_vote or cam_cpas_update_axi_vote
+ *     to remove their bandwidth vote.
+ *
+ * @client_handle: client cpas handle
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_stop(uint32_t client_handle);
+
+/**
+ * cam_cpas_update_ahb_vote()
+ *
+ * @brief: API to update AHB vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @ahb_vote      : Pointer to ahb vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_ahb_vote(
+	uint32_t               client_handle,
+	struct cam_ahb_vote   *ahb_vote);
+
+/**
+ * cam_cpas_update_axi_vote()
+ *
+ * @brief: API to update AXI vote requirement. Use this function only
+ *     between cam_cpas_start and cam_cpas_stop in case clients wants
+ *     to scale to different vote level. Do not use this function to de-vote,
+ *     removing client's vote is implicit on cam_cpas_stop
+ *
+ * @client_handle : Client cpas handle
+ * @axi_vote      : Pointer to axi bandwidth vote info
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_update_axi_vote(
+	uint32_t             client_handle,
+	struct cam_axi_vote *axi_vote);
+
+/**
+ * cam_cpas_reg_write()
+ *
+ * @brief: API to write a register value in CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg write with memory barrier
+ * @value         : Value to be written in register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_write(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                  value);
+
+/**
+ * cam_cpas_reg_read()
+ *
+ * @brief: API to read a register value from CPAS register space
+ *
+ * @client_handle : Client cpas handle
+ * @reg_base      : Register base identifier
+ * @offset        : Offset from the register base address
+ * @mb            : Whether to do reg read with memory barrier
+ * @value         : Value to be red from register
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_reg_read(
+	uint32_t                  client_handle,
+	enum cam_cpas_reg_base    reg_base,
+	uint32_t                  offset,
+	bool                      mb,
+	uint32_t                 *value);
+
+/**
+ * cam_cpas_get_hw_info()
+ *
+ * @brief: API to get camera hw information
+ *
+ * @camera_family  : Camera family type. One of
+ *                   CAM_FAMILY_CAMERA_SS
+ *                   CAM_FAMILY_CPAS_SS
+ * @camera_version : Camera platform version
+ * @cpas_version   : Camera cpas version
+ * @cam_caps       : Camera capability
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_hw_info(
+	uint32_t                 *camera_family,
+	struct cam_hw_version    *camera_version,
+	struct cam_hw_version    *cpas_version,
+	uint32_t                 *cam_caps);
+
+/**
+ * cam_cpas_get_cpas_hw_version()
+ *
+ * @brief: API to get camera cpas hw version
+ *
+ * @hw_version  : Camera cpas hw version
+ *
+ * @return 0 on success.
+ *
+ */
+int cam_cpas_get_cpas_hw_version(
+	uint32_t				 *hw_version);
+
+#endif /* _CAM_CPAS_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/Makefile b/drivers/media/platform/msm/camera/cam_fd/Makefile
new file mode 100644
index 0000000..b5a15e8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_dev.o cam_fd_context.o
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
new file mode 100644
index 0000000..713ee05
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_fd_context.h"
+#include "cam_trace.h"
+
+static const char fd_dev_name[] = "fd";
+
+/* Functions in Available state */
+static int __cam_fd_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Acquire dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("FD", ctx);
+
+	return rc;
+}
+
+/* Functions in Acquired state */
+static int __cam_fd_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_AVAILABLE;
+	trace_cam_context_state("FD", ctx);
+
+	return rc;
+}
+
+static int __cam_fd_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Start dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACTIVATED;
+	trace_cam_context_state("FD", ctx);
+
+	return rc;
+}
+
+/* Functions in Activated state */
+static int __cam_fd_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("FD", ctx);
+
+	return rc;
+}
+
+static int __cam_fd_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = __cam_fd_ctx_stop_dev_in_activated(ctx, NULL);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Stop dev, rc=%d", rc);
+		return rc;
+	}
+
+	rc = __cam_fd_ctx_release_dev_in_acquired(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Release dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_flush_dev_in_activated(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to flush device, rc=%d", rc);
+
+	return rc;
+}
+static int __cam_fd_ctx_config_dev_in_activated(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in Prepare dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_fd_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc;
+
+	rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in buf done, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_fd_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_fd_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_fd_ctx_release_dev_in_acquired,
+			.config_dev = __cam_fd_ctx_config_dev_in_acquired,
+			.start_dev = __cam_fd_ctx_start_dev_in_acquired,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = { },
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_fd_ctx_stop_dev_in_activated,
+			.release_dev = __cam_fd_ctx_release_dev_in_activated,
+			.config_dev = __cam_fd_ctx_config_dev_in_activated,
+			.flush_dev = __cam_fd_ctx_flush_dev_in_activated,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_fd_ctx_handle_irq_in_activated,
+	},
+};
+
+
+int cam_fd_context_init(struct cam_fd_context *fd_ctx,
+	struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id)
+{
+	int rc;
+
+	if (!base_ctx || !fd_ctx) {
+		CAM_ERR(CAM_FD, "Invalid Context %pK %pK", base_ctx, fd_ctx);
+		return -EINVAL;
+	}
+
+	memset(fd_ctx, 0, sizeof(*fd_ctx));
+
+	rc = cam_context_init(base_ctx, fd_dev_name, CAM_FD, ctx_id,
+		NULL, hw_intf, fd_ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Camera Context Base init failed, rc=%d", rc);
+		return rc;
+	}
+
+	fd_ctx->base = base_ctx;
+	base_ctx->ctx_priv = fd_ctx;
+	base_ctx->state_machine = cam_fd_ctx_state_machine;
+
+	return rc;
+}
+
+int cam_fd_context_deinit(struct cam_fd_context *fd_ctx)
+{
+	int rc = 0;
+
+	if (!fd_ctx || !fd_ctx->base) {
+		CAM_ERR(CAM_FD, "Invalid inputs %pK", fd_ctx);
+		return -EINVAL;
+	}
+
+	rc = cam_context_deinit(fd_ctx->base);
+	if (rc)
+		CAM_ERR(CAM_FD, "Error in base deinit, rc=%d", rc);
+
+	memset(fd_ctx, 0, sizeof(*fd_ctx));
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.h b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.h
new file mode 100644
index 0000000..26297ed
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_context.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_CONTEXT_H_
+#define _CAM_FD_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+
+/**
+ * struct cam_fd_context - Face Detection context information
+ *
+ * @base     : Base context pointer for this FD context
+ * @req_base : List of base requests for this FD context
+ */
+struct cam_fd_context {
+	struct cam_context       *base;
+	struct cam_ctx_request    req_base[CAM_CTX_REQ_MAX];
+};
+
+int cam_fd_context_init(struct cam_fd_context *fd_ctx,
+	struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id);
+int cam_fd_context_deinit(struct cam_fd_context *ctx);
+
+#endif /* _CAM_FD_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/cam_fd_dev.c b/drivers/media/platform/msm/camera/cam_fd/cam_fd_dev.c
new file mode 100644
index 0000000..90c47f0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/cam_fd_dev.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_fd_context.h"
+#include "cam_fd_hw_mgr.h"
+#include "cam_fd_hw_mgr_intf.h"
+
+#define CAM_FD_DEV_NAME "cam-fd"
+
+/**
+ * struct cam_fd_dev - FD device information
+ *
+ * @sd:         Subdev information
+ * @base_ctx:   List of base contexts
+ * @fd_ctx:     List of FD contexts
+ * @lock:       Mutex handle
+ * @open_cnt:   FD subdev open count
+ * @probe_done: Whether FD probe is completed
+ */
+struct cam_fd_dev {
+	struct cam_subdev     sd;
+	struct cam_context    base_ctx[CAM_CTX_MAX];
+	struct cam_fd_context fd_ctx[CAM_CTX_MAX];
+	struct mutex          lock;
+	uint32_t              open_cnt;
+	bool                  probe_done;
+};
+
+static struct cam_fd_dev g_fd_dev;
+
+static int cam_fd_dev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_fd_dev *fd_dev = &g_fd_dev;
+
+	if (!fd_dev->probe_done) {
+		CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
+		return -ENODEV;
+	}
+
+	mutex_lock(&fd_dev->lock);
+	fd_dev->open_cnt++;
+	CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
+	mutex_unlock(&fd_dev->lock);
+
+	return 0;
+}
+
+static int cam_fd_dev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_fd_dev *fd_dev = &g_fd_dev;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	if (!fd_dev->probe_done) {
+		CAM_ERR(CAM_FD, "FD Dev not initialized, fd_dev=%pK", fd_dev);
+		return -ENODEV;
+	}
+
+	mutex_lock(&fd_dev->lock);
+	fd_dev->open_cnt--;
+	CAM_DBG(CAM_FD, "FD Subdev open count %d", fd_dev->open_cnt);
+	mutex_unlock(&fd_dev->lock);
+
+	if (!node) {
+		CAM_ERR(CAM_FD, "Node ptr is NULL");
+		return -EINVAL;
+	}
+
+	cam_node_shutdown(node);
+
+	return 0;
+}
+
+static const struct v4l2_subdev_internal_ops cam_fd_subdev_internal_ops = {
+	.open = cam_fd_dev_open,
+	.close = cam_fd_dev_close,
+};
+
+static int cam_fd_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+
+	g_fd_dev.sd.internal_ops = &cam_fd_subdev_internal_ops;
+
+	/* Initialize the v4l2 subdevice first. (create cam_node) */
+	rc = cam_subdev_probe(&g_fd_dev.sd, pdev, CAM_FD_DEV_NAME,
+		CAM_FD_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_FD, "FD cam_subdev_probe failed, rc=%d", rc);
+		return rc;
+	}
+	node = (struct cam_node *) g_fd_dev.sd.token;
+
+	rc = cam_fd_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in initializing FD HW manager, rc=%d",
+			rc);
+		goto unregister_subdev;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_fd_context_init(&g_fd_dev.fd_ctx[i],
+			&g_fd_dev.base_ctx[i], &node->hw_mgr_intf, i);
+		if (rc) {
+			CAM_ERR(CAM_FD, "FD context init failed i=%d, rc=%d",
+				i, rc);
+			goto deinit_ctx;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_fd_dev.base_ctx, CAM_CTX_MAX,
+		CAM_FD_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_FD, "FD node init failed, rc=%d", rc);
+		goto deinit_ctx;
+	}
+
+	mutex_init(&g_fd_dev.lock);
+	g_fd_dev.probe_done = true;
+
+	CAM_DBG(CAM_FD, "Camera FD probe complete");
+
+	return 0;
+
+deinit_ctx:
+	for (--i; i >= 0; i--) {
+		if (cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]))
+			CAM_ERR(CAM_FD, "FD context %d deinit failed", i);
+	}
+unregister_subdev:
+	if (cam_subdev_remove(&g_fd_dev.sd))
+		CAM_ERR(CAM_FD, "Failed in subdev remove");
+
+	return rc;
+}
+
+static int cam_fd_dev_remove(struct platform_device *pdev)
+{
+	int i, rc;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_fd_context_deinit(&g_fd_dev.fd_ctx[i]);
+		if (rc)
+			CAM_ERR(CAM_FD, "FD context %d deinit failed, rc=%d",
+				i, rc);
+	}
+
+	rc = cam_fd_hw_mgr_deinit(pdev->dev.of_node);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in hw mgr deinit, rc=%d", rc);
+
+	rc = cam_subdev_remove(&g_fd_dev.sd);
+	if (rc)
+		CAM_ERR(CAM_FD, "Unregister failed, rc=%d", rc);
+
+	mutex_destroy(&g_fd_dev.lock);
+	g_fd_dev.probe_done = false;
+
+	return rc;
+}
+
+static const struct of_device_id cam_fd_dt_match[] = {
+	{
+		.compatible = "qcom,cam-fd"
+	},
+	{}
+};
+
+static struct platform_driver cam_fd_driver = {
+	.probe = cam_fd_dev_probe,
+	.remove = cam_fd_dev_remove,
+	.driver = {
+		.name = "cam_fd",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_fd_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_fd_dev_init_module(void)
+{
+	return platform_driver_register(&cam_fd_driver);
+}
+
+static void __exit cam_fd_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_fd_driver);
+}
+
+module_init(cam_fd_dev_init_module);
+module_exit(cam_fd_dev_exit_module);
+MODULE_DESCRIPTION("MSM FD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/Makefile
new file mode 100644
index 0000000..7782e7f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += fd_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
new file mode 100644
index 0000000..4a2c863
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.c
@@ -0,0 +1,1936 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_fd_context.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_fd_hw_mgr_intf.h"
+#include "cam_fd_hw_mgr.h"
+#include "cam_trace.h"
+
+static struct cam_fd_hw_mgr g_fd_hw_mgr;
+
+static int cam_fd_mgr_util_packet_validate(struct cam_packet *packet)
+{
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	int i, rc;
+
+	if (!packet)
+		return -EINVAL;
+
+	CAM_DBG(CAM_FD, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+		packet->header.request_id, packet->header.op_code,
+		packet->header.size, packet->header.flags);
+	CAM_DBG(CAM_FD,
+		"Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+		packet->cmd_buf_offset, packet->num_cmd_buf,
+		packet->io_configs_offset, packet->num_io_configs);
+	CAM_DBG(CAM_FD,
+		"Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+		packet->patch_offset, packet->num_patches,
+		packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+	if (cam_packet_util_validate_packet(packet)) {
+		CAM_ERR(CAM_FD, "invalid packet:%d %d %d %d %d",
+			packet->kmd_cmd_buf_index,
+			packet->num_cmd_buf, packet->cmd_buf_offset,
+			packet->io_configs_offset, packet->header.size);
+		return -EINVAL;
+	}
+
+	/* All buffers must come through io config, do not support patching */
+	if (packet->num_patches || !packet->num_io_configs) {
+		CAM_ERR(CAM_FD, "wrong number of cmd/patch info: %u %u",
+			packet->num_cmd_buf, packet->num_patches);
+		return -EINVAL;
+	}
+
+	/* KMD Buf index can never be greater than or equal to num cmd bufs */
+	if (packet->kmd_cmd_buf_index >= packet->num_cmd_buf) {
+		CAM_ERR(CAM_FD, "Invalid kmd index %d (%d)",
+			packet->kmd_cmd_buf_index, packet->num_cmd_buf);
+		return -EINVAL;
+	}
+
+	if ((packet->header.op_code & 0xff) !=
+		CAM_PACKET_OPCODES_FD_FRAME_UPDATE) {
+		CAM_ERR(CAM_FD, "Invalid op_code %u",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
+		packet->cmd_buf_offset);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		/*
+		 * We can allow 0 length cmd buffer. This can happen in case
+		 * umd gives an empty cmd buffer as kmd buffer
+		 */
+		if (!cmd_desc[i].length)
+			continue;
+
+		if ((cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_GENERIC) &&
+			(cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)) {
+			CAM_ERR(CAM_FD, "Invalid meta_data [%d] %u", i,
+				cmd_desc[i].meta_data);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_FD,
+			"CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+			i,
+			cmd_desc[i].mem_handle, cmd_desc[i].offset,
+			cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+			cmd_desc[i].meta_data);
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc) {
+			CAM_ERR(CAM_FD, "Invalid cmd buffer %d", i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_put_ctx(
+	struct list_head *src_list,
+	struct cam_fd_hw_mgr_ctx **fd_ctx)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.ctx_mutex);
+	ctx_ptr = *fd_ctx;
+	if (ctx_ptr)
+		list_add_tail(&ctx_ptr->list, src_list);
+	*fd_ctx = NULL;
+	mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_ctx(
+	struct list_head *src_list,
+	struct cam_fd_hw_mgr_ctx **fd_ctx)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr_ctx *ctx_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.ctx_mutex);
+	if (!list_empty(src_list)) {
+		ctx_ptr = list_first_entry(src_list,
+			struct cam_fd_hw_mgr_ctx, list);
+		list_del_init(&ctx_ptr->list);
+	} else {
+		CAM_ERR(CAM_FD, "No more free fd hw mgr ctx");
+		rc = -1;
+	}
+	*fd_ctx = ctx_ptr;
+	mutex_unlock(&g_fd_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_put_frame_req(
+	struct list_head *src_list,
+	struct cam_fd_mgr_frame_request **frame_req)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *req_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
+	req_ptr = *frame_req;
+	if (req_ptr)
+		list_add_tail(&req_ptr->list, src_list);
+	*frame_req = NULL;
+	mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_frame_req(
+	struct list_head *src_list,
+	struct cam_fd_mgr_frame_request **frame_req)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *req_ptr = NULL;
+
+	mutex_lock(&g_fd_hw_mgr.frame_req_mutex);
+	if (!list_empty(src_list)) {
+		req_ptr = list_first_entry(src_list,
+			struct cam_fd_mgr_frame_request, list);
+		list_del_init(&req_ptr->list);
+	} else {
+		CAM_DBG(CAM_FD, "Frame req not available");
+		rc = -EPERM;
+	}
+	*frame_req = req_ptr;
+	mutex_unlock(&g_fd_hw_mgr.frame_req_mutex);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_fd_device **hw_device)
+{
+	if (!hw_mgr || !hw_ctx || !hw_device) {
+		CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
+			hw_device);
+		return -EINVAL;
+	}
+
+	if ((hw_ctx->device_index < 0) ||
+		(hw_ctx->device_index >= CAM_FD_HW_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid device indx %d", hw_ctx->device_index);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_FD, "ctx_index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	*hw_device = &hw_mgr->hw_device[hw_ctx->device_index];
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_release_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx)
+{
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_release_args hw_release_args;
+	int rc;
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	if (hw_device->hw_intf->hw_ops.release) {
+		hw_release_args.hw_ctx = hw_ctx;
+		hw_release_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.release(
+			hw_device->hw_intf->hw_priv, &hw_release_args,
+			sizeof(hw_release_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW release %d", rc);
+			return rc;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid release function");
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	hw_device->num_ctxts--;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	hw_ctx->device_index = -1;
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_select_device(struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_fd_hw_mgr_ctx *hw_ctx,
+	struct cam_fd_acquire_dev_info *fd_acquire_args)
+{
+	int i, rc;
+	struct cam_fd_hw_reserve_args hw_reserve_args;
+	struct cam_fd_device *hw_device = NULL;
+
+	if (!hw_mgr || !hw_ctx || !fd_acquire_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK %pK %pK", hw_mgr, hw_ctx,
+			fd_acquire_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	/* Check if a device is free which can satisfy the requirements */
+	for (i = 0; i < hw_mgr->num_devices; i++) {
+		hw_device = &hw_mgr->hw_device[i];
+		CAM_DBG(CAM_FD,
+			"[%d] : num_ctxts=%d, modes=%d, raw_results=%d",
+			i, hw_device->num_ctxts,
+			hw_device->hw_caps.supported_modes,
+			hw_device->hw_caps.raw_results_available);
+		if ((hw_device->num_ctxts == 0) &&
+			(fd_acquire_args->mode &
+			hw_device->hw_caps.supported_modes) &&
+			(!fd_acquire_args->get_raw_results ||
+			hw_device->hw_caps.raw_results_available)) {
+			CAM_DBG(CAM_FD, "Found dedicated HW Index=%d", i);
+			hw_device->num_ctxts++;
+			break;
+		}
+	}
+
+	/*
+	 * We couldn't find a free HW which meets requirement, now check if
+	 * there is a HW which meets acquire requirements
+	 */
+	if (i == hw_mgr->num_devices) {
+		for (i = 0; i < hw_mgr->num_devices; i++) {
+			hw_device = &hw_mgr->hw_device[i];
+			if ((fd_acquire_args->mode &
+				hw_device->hw_caps.supported_modes) &&
+				(!fd_acquire_args->get_raw_results ||
+				hw_device->hw_caps.raw_results_available)) {
+				hw_device->num_ctxts++;
+				CAM_DBG(CAM_FD, "Found sharing HW Index=%d", i);
+				break;
+			}
+		}
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	if ((i == hw_mgr->num_devices) || !hw_device) {
+		CAM_ERR(CAM_FD, "Couldn't acquire HW %d %d",
+			fd_acquire_args->mode,
+			fd_acquire_args->get_raw_results);
+		return -EBUSY;
+	}
+
+	CAM_DBG(CAM_FD, "Device index %d selected for this acquire", i);
+
+	/* Check if we can reserve this HW */
+	if (hw_device->hw_intf->hw_ops.reserve) {
+		hw_reserve_args.hw_ctx = hw_ctx;
+		hw_reserve_args.mode = fd_acquire_args->mode;
+		rc = hw_device->hw_intf->hw_ops.reserve(
+			hw_device->hw_intf->hw_priv, &hw_reserve_args,
+			sizeof(hw_reserve_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW reserve %d", rc);
+			return rc;
+		}
+		hw_ctx->ctx_hw_private = hw_reserve_args.ctx_hw_private;
+	} else {
+		CAM_ERR(CAM_FD, "Invalid reserve function");
+		return -EPERM;
+	}
+
+	/* Update required info in hw context */
+	hw_ctx->device_index = i;
+
+	CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_pdev_get_hw_intf(struct device_node *of_node,
+	int i, struct cam_hw_intf **device_hw_intf)
+{
+	struct device_node *device_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *hw_intf = NULL;
+	const char *name = NULL;
+	int rc;
+
+	rc = of_property_read_string_index(of_node, "compat-hw-name", i, &name);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Getting dev object name failed %d %d", i, rc);
+		goto put_node;
+	}
+
+	device_node = of_find_node_by_name(NULL, name);
+	if (!device_node) {
+		CAM_ERR(CAM_FD, "Cannot find node in dtsi %s", name);
+		return -ENODEV;
+	}
+
+	child_pdev = of_find_device_by_node(device_node);
+	if (!child_pdev) {
+		CAM_ERR(CAM_FD, "Failed to find device on bus %s",
+			device_node->name);
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	hw_intf = (struct cam_hw_intf *)platform_get_drvdata(child_pdev);
+	if (!hw_intf) {
+		CAM_ERR(CAM_FD, "No driver data for child device");
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	CAM_DBG(CAM_FD, "child type %d index %d child_intf %pK",
+		hw_intf->hw_type, hw_intf->hw_idx, hw_intf);
+
+	if (hw_intf->hw_idx >= CAM_FD_HW_MAX) {
+		CAM_ERR(CAM_FD, "hw_idx invalid %d", hw_intf->hw_idx);
+		rc = -ENODEV;
+		goto put_node;
+	}
+
+	rc = 0;
+	*device_hw_intf = hw_intf;
+
+put_node:
+	of_node_put(device_node);
+
+	return rc;
+}
+
+static int cam_fd_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	struct cam_fd_hw_cmd_prestart_args *prestart_args =
+		(struct cam_fd_hw_cmd_prestart_args *)user_data;
+
+	if (!blob_data || (blob_size == 0)) {
+		CAM_ERR(CAM_FD, "Invalid blob info %pK %u", blob_data,
+			blob_size);
+		return -EINVAL;
+	}
+
+	if (!prestart_args) {
+		CAM_ERR(CAM_FD, "Invalid user data");
+		return -EINVAL;
+	}
+
+	switch (blob_type) {
+	case CAM_FD_BLOB_TYPE_RAW_RESULTS_REQUIRED: {
+		uint32_t *get_raw_results = (uint32_t *)blob_data;
+
+		if (sizeof(uint32_t) != blob_size) {
+			CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
+				sizeof(uint32_t), blob_size);
+			return -EINVAL;
+		}
+
+		prestart_args->get_raw_results = *get_raw_results;
+		break;
+	}
+	case CAM_FD_BLOB_TYPE_SOC_CLOCK_BW_REQUEST: {
+		struct cam_fd_soc_clock_bw_request *clk_req =
+			(struct cam_fd_soc_clock_bw_request *)blob_data;
+
+		if (sizeof(struct cam_fd_soc_clock_bw_request) != blob_size) {
+			CAM_ERR(CAM_FD, "Invalid blob size %zu %u",
+				sizeof(struct cam_fd_soc_clock_bw_request),
+				blob_size);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_FD, "SOC Clk Request clock=%lld, bw=%lld",
+			clk_req->clock_rate, clk_req->bandwidth);
+
+		break;
+	}
+	default:
+		CAM_WARN(CAM_FD, "Unknown blob type %d", blob_type);
+		break;
+	}
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_parse_generic_cmd_buffer(
+	struct cam_fd_hw_mgr_ctx *hw_ctx, struct cam_packet *packet,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args)
+{
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	int i, rc = 0;
+
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)&packet->payload +
+		packet->cmd_buf_offset);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if (cmd_desc[i].meta_data == CAM_FD_CMD_BUFFER_ID_CDM)
+			continue;
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc)
+			return rc;
+
+		rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
+			cam_fd_packet_generic_blob_handler, prestart_args);
+		if (rc)
+			CAM_ERR(CAM_FD, "Failed in processing blobs %d", rc);
+
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_get_buf_map_requirement(uint32_t direction,
+	uint32_t resource_type, bool *need_io_map, bool *need_cpu_map)
+{
+	if (!need_io_map || !need_cpu_map) {
+		CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK", need_io_map,
+			need_cpu_map);
+		return -EINVAL;
+	}
+
+	if (direction == CAM_BUF_INPUT) {
+		switch (resource_type) {
+		case CAM_FD_INPUT_PORT_ID_IMAGE:
+			*need_io_map = true;
+			*need_cpu_map = false;
+			break;
+		default:
+			CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d",
+				direction, resource_type);
+			return -EINVAL;
+		}
+	} else if (direction == CAM_BUF_OUTPUT) {
+		switch (resource_type) {
+		case CAM_FD_OUTPUT_PORT_ID_RESULTS:
+			*need_io_map = true;
+			*need_cpu_map = true;
+			break;
+		case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS:
+			*need_io_map = true;
+			*need_cpu_map = true;
+			break;
+		case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER:
+			*need_io_map = true;
+			*need_cpu_map = false;
+			break;
+		default:
+			CAM_WARN(CAM_FD, "Invalid port: dir %d, id %d",
+				direction, resource_type);
+			return -EINVAL;
+		}
+	} else {
+		CAM_WARN(CAM_FD, "Invalid direction %d", direction);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_prepare_io_buf_info(int32_t iommu_hdl,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_fd_hw_io_buffer *input_buf,
+	struct cam_fd_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+	int rc = -EINVAL;
+	uint32_t i, j, plane, num_out_buf, num_in_buf;
+	struct cam_buf_io_cfg *io_cfg;
+	dma_addr_t io_addr[CAM_PACKET_MAX_PLANES];
+	uintptr_t cpu_addr[CAM_PACKET_MAX_PLANES];
+	size_t size;
+	bool need_io_map, need_cpu_map;
+
+	/* Get IO Buf information */
+	num_out_buf = 0;
+	num_in_buf  = 0;
+	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+		&prepare->packet->payload + prepare->packet->io_configs_offset);
+
+	for (i = 0; i < prepare->packet->num_io_configs; i++) {
+		CAM_DBG(CAM_FD,
+			"IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+			i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+			io_cfg[i].resource_type,
+			io_cfg[i].fence, io_cfg[i].format);
+
+		if ((num_in_buf >= io_buf_size) ||
+			(num_out_buf >= io_buf_size)) {
+			CAM_ERR(CAM_FD, "Invalid number of buffers %d %d %d",
+				num_in_buf, num_out_buf, io_buf_size);
+			return -EINVAL;
+		}
+
+		rc = cam_fd_mgr_util_get_buf_map_requirement(
+			io_cfg[i].direction, io_cfg[i].resource_type,
+			&need_io_map, &need_cpu_map);
+		if (rc) {
+			CAM_WARN(CAM_FD, "Invalid io buff [%d] : %d %d %d",
+				i, io_cfg[i].direction,
+				io_cfg[i].resource_type, rc);
+			continue;
+		}
+
+		memset(io_addr, 0x0, sizeof(io_addr));
+		for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+			if (!io_cfg[i].mem_handle[plane])
+				break;
+
+			io_addr[plane] = 0x0;
+			cpu_addr[plane] = 0x0;
+
+			if (need_io_map) {
+				rc = cam_mem_get_io_buf(
+					io_cfg[i].mem_handle[plane],
+					iommu_hdl, &io_addr[plane], &size);
+				if (rc) {
+					CAM_ERR(CAM_FD,
+						"Invalid io buf %d %d %d %d",
+						io_cfg[i].direction,
+						io_cfg[i].resource_type, plane,
+						rc);
+					return -ENOMEM;
+				}
+
+				io_addr[plane] += io_cfg[i].offsets[plane];
+			}
+
+			if (need_cpu_map) {
+				rc = cam_mem_get_cpu_buf(
+					io_cfg[i].mem_handle[plane],
+					&cpu_addr[plane], &size);
+				if (rc || ((io_addr[plane] & 0xFFFFFFFF)
+					!= io_addr[plane])) {
+					CAM_ERR(CAM_FD,
+						"Invalid cpu buf %d %d %d %d",
+						io_cfg[i].direction,
+						io_cfg[i].resource_type, plane,
+						rc);
+					return rc;
+				}
+
+				cpu_addr[plane] += io_cfg[i].offsets[plane];
+			}
+
+			CAM_DBG(CAM_FD, "IO Address[%d][%d] : %pK, %pK",
+				io_cfg[i].direction, plane, io_addr[plane],
+				cpu_addr[plane]);
+		}
+
+		switch (io_cfg[i].direction) {
+		case CAM_BUF_INPUT: {
+			prepare->in_map_entries[num_in_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->in_map_entries[num_in_buf].sync_id =
+				io_cfg[i].fence;
+
+			input_buf[num_in_buf].valid = true;
+			for (j = 0; j < plane; j++) {
+				input_buf[num_in_buf].io_addr[j] = io_addr[j];
+				input_buf[num_in_buf].cpu_addr[j] = cpu_addr[j];
+			}
+			input_buf[num_in_buf].num_buf = plane;
+			input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+			num_in_buf++;
+			break;
+		}
+		case CAM_BUF_OUTPUT: {
+			prepare->out_map_entries[num_out_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->out_map_entries[num_out_buf].sync_id =
+				io_cfg[i].fence;
+
+			output_buf[num_out_buf].valid = true;
+			for (j = 0; j < plane; j++) {
+				output_buf[num_out_buf].io_addr[j] = io_addr[j];
+				output_buf[num_out_buf].cpu_addr[j] =
+					cpu_addr[j];
+			}
+			output_buf[num_out_buf].num_buf = plane;
+			output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+			num_out_buf++;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Unsupported io direction %d",
+				io_cfg[i].direction);
+			return -EINVAL;
+		}
+	}
+
+	prepare->num_in_map_entries  = num_in_buf;
+	prepare->num_out_map_entries = num_out_buf;
+
+	return 0;
+}
+
+static int cam_fd_mgr_util_prepare_hw_update_entries(
+	struct cam_fd_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args,
+	struct cam_kmd_buf_info *kmd_buf_info)
+{
+	int i, rc;
+	struct cam_hw_update_entry *hw_entry;
+	uint32_t num_ent;
+	struct cam_fd_hw_mgr_ctx *hw_ctx =
+		(struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
+	struct cam_fd_device *hw_device;
+	uint32_t kmd_buf_max_size, kmd_buf_used_bytes = 0;
+	uint32_t *kmd_buf_addr;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+		kmd_buf_info->used_bytes);
+	kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+	prestart_args->cmd_buf_addr = kmd_buf_addr;
+	prestart_args->size  = kmd_buf_max_size;
+	prestart_args->pre_config_buf_size = 0;
+	prestart_args->post_config_buf_size = 0;
+
+	if (hw_device->hw_intf->hw_ops.process_cmd) {
+		rc = hw_device->hw_intf->hw_ops.process_cmd(
+			hw_device->hw_intf->hw_priv, CAM_FD_HW_CMD_PRESTART,
+			prestart_args,
+			sizeof(struct cam_fd_hw_cmd_prestart_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d", rc);
+			return rc;
+		}
+	}
+
+	kmd_buf_used_bytes += prestart_args->pre_config_buf_size;
+	kmd_buf_used_bytes += prestart_args->post_config_buf_size;
+
+	/* HW layer is expected to add commands */
+	if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+		CAM_ERR(CAM_FD, "Invalid kmd used bytes %d (%d)",
+			kmd_buf_used_bytes, kmd_buf_max_size);
+		return -ENOMEM;
+	}
+
+	hw_entry = prepare->hw_update_entries;
+	num_ent = 0;
+
+	if (prestart_args->pre_config_buf_size) {
+		if ((num_ent + 1) >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = kmd_buf_info->handle;
+		hw_entry[num_ent].len  = prestart_args->pre_config_buf_size;
+		hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+		kmd_buf_info->used_bytes += prestart_args->pre_config_buf_size;
+		kmd_buf_info->offset += prestart_args->pre_config_buf_size;
+		num_ent++;
+	}
+
+	/*
+	 * set the cmd_desc to point the first command descriptor in the
+	 * packet and update hw entries with CDM command buffers
+	 */
+	cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+		&prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if (cmd_desc[i].meta_data != CAM_FD_CMD_BUFFER_ID_CDM)
+			continue;
+
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = cmd_desc[i].mem_handle;
+		hw_entry[num_ent].len = cmd_desc[i].length;
+		hw_entry[num_ent].offset = cmd_desc[i].offset;
+		num_ent++;
+	}
+
+	if (prestart_args->post_config_buf_size) {
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_FD, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_ent].handle = kmd_buf_info->handle;
+		hw_entry[num_ent].len    = prestart_args->post_config_buf_size;
+		hw_entry[num_ent].offset = kmd_buf_info->offset;
+
+		kmd_buf_info->used_bytes += prestart_args->post_config_buf_size;
+		kmd_buf_info->offset     += prestart_args->post_config_buf_size;
+
+		num_ent++;
+	}
+
+	prepare->num_hw_update_entries = num_ent;
+
+	CAM_DBG(CAM_FD, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+		prepare->num_hw_update_entries, prepare->num_in_map_entries,
+		prepare->num_out_map_entries);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_submit_frame(void *priv, void *data)
+{
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_mgr *hw_mgr;
+	struct cam_fd_mgr_frame_request *frame_req;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_hw_cmd_start_args start_args;
+	int rc;
+
+	if (!priv) {
+		CAM_ERR(CAM_FD, "Invalid data");
+		return -EINVAL;
+	}
+
+	hw_mgr = (struct cam_fd_hw_mgr *)priv;
+	mutex_lock(&hw_mgr->frame_req_mutex);
+
+	/* Check if we have any frames pending in high priority list */
+	if (!list_empty(&hw_mgr->frame_pending_list_high)) {
+		CAM_DBG(CAM_FD, "Pending frames in high priority list");
+		frame_req = list_first_entry(&hw_mgr->frame_pending_list_high,
+			struct cam_fd_mgr_frame_request, list);
+	} else if (!list_empty(&hw_mgr->frame_pending_list_normal)) {
+		CAM_DBG(CAM_FD, "Pending frames in normal priority list");
+		frame_req = list_first_entry(&hw_mgr->frame_pending_list_normal,
+			struct cam_fd_mgr_frame_request, list);
+	} else {
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		CAM_DBG(CAM_FD, "No pending frames");
+		return 0;
+	}
+
+	CAM_DBG(CAM_FD, "FrameSubmit : Frame[%lld]", frame_req->request_id);
+	hw_ctx = frame_req->hw_ctx;
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_device->lock);
+	if (hw_device->ready_to_process == false) {
+		mutex_unlock(&hw_device->lock);
+		mutex_unlock(&hw_mgr->frame_req_mutex);
+		return -EBUSY;
+	}
+
+	trace_cam_submit_to_hw("FD", frame_req->request_id);
+
+	list_del_init(&frame_req->list);
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	if (hw_device->hw_intf->hw_ops.start) {
+		start_args.hw_ctx = hw_ctx;
+		start_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		start_args.hw_req_private = &frame_req->hw_req_private;
+		start_args.hw_update_entries = frame_req->hw_update_entries;
+		start_args.num_hw_update_entries =
+			frame_req->num_hw_update_entries;
+
+		rc = hw_device->hw_intf->hw_ops.start(
+			hw_device->hw_intf->hw_priv, &start_args,
+			sizeof(start_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW Start %d", rc);
+			mutex_unlock(&hw_device->lock);
+			goto put_req_into_free_list;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid hw_ops.start");
+		mutex_unlock(&hw_device->lock);
+		rc = -EPERM;
+		goto put_req_into_free_list;
+	}
+
+	hw_device->ready_to_process = false;
+	hw_device->cur_hw_ctx = hw_ctx;
+	hw_device->req_id = frame_req->request_id;
+	mutex_unlock(&hw_device->lock);
+
+	rc = cam_fd_mgr_util_put_frame_req(
+		&hw_mgr->frame_processing_list, &frame_req);
+	if (rc) {
+		CAM_ERR(CAM_FD,
+			"Failed in putting frame req in processing list");
+		goto stop_unlock;
+	}
+
+	return rc;
+
+stop_unlock:
+	if (hw_device->hw_intf->hw_ops.stop) {
+		struct cam_fd_hw_stop_args stop_args;
+
+		stop_args.hw_ctx = hw_ctx;
+		stop_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		stop_args.hw_req_private = &frame_req->hw_req_private;
+		if (hw_device->hw_intf->hw_ops.stop(
+			hw_device->hw_intf->hw_priv, &stop_args,
+			sizeof(stop_args)))
+			CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+	}
+put_req_into_free_list:
+	cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list, &frame_req);
+
+	return rc;
+}
+
+static int cam_fd_mgr_util_schedule_frame_worker_task(
+	struct cam_fd_hw_mgr *hw_mgr)
+{
+	int32_t rc = 0;
+	struct crm_workq_task *task;
+	struct cam_fd_mgr_work_data *work_data;
+
+	task = cam_req_mgr_workq_get_task(hw_mgr->work);
+	if (!task) {
+		CAM_ERR(CAM_FD, "no empty task available");
+		return -ENOMEM;
+	}
+
+	work_data = (struct cam_fd_mgr_work_data *)task->payload;
+	work_data->type = CAM_FD_WORK_FRAME;
+
+	task->process_cb = cam_fd_mgr_util_submit_frame;
+	rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int32_t cam_fd_mgr_workq_irq_cb(void *priv, void *data)
+{
+	struct cam_fd_device *hw_device = NULL;
+	struct cam_fd_hw_mgr *hw_mgr;
+	struct cam_fd_mgr_work_data *work_data;
+	struct cam_fd_mgr_frame_request *frame_req = NULL;
+	enum cam_fd_hw_irq_type irq_type;
+	bool frame_abort = true;
+	int rc;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_FD, "Invalid data %pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	hw_mgr = (struct cam_fd_hw_mgr *)priv;
+	work_data = (struct cam_fd_mgr_work_data *)data;
+	irq_type = work_data->irq_type;
+
+	CAM_DBG(CAM_FD, "FD IRQ type=%d", irq_type);
+
+	if (irq_type == CAM_FD_IRQ_HALT_DONE) {
+		/* HALT would be followed by a RESET, ignore this */
+		CAM_DBG(CAM_FD, "HALT IRQ callback");
+		return 0;
+	}
+
+	/* Get the frame from processing list */
+	rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_processing_list,
+		&frame_req);
+	if (rc || !frame_req) {
+		/*
+		 * This can happen if reset is triggered while no frames
+		 * were pending, so not an error, just continue to check if
+		 * there are any pending frames and submit
+		 */
+		CAM_DBG(CAM_FD, "No Frame in processing list, rc=%d", rc);
+		goto submit_next_frame;
+	}
+
+	if (!frame_req->hw_ctx) {
+		CAM_ERR(CAM_FD, "Invalid Frame request %lld",
+			frame_req->request_id);
+		goto put_req_in_free_list;
+	}
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, frame_req->hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		goto put_req_in_free_list;
+	}
+
+	/* Read frame results first */
+	if (irq_type == CAM_FD_IRQ_FRAME_DONE) {
+		struct cam_fd_hw_frame_done_args frame_done_args;
+
+		CAM_DBG(CAM_FD, "FrameDone : Frame[%lld]",
+			frame_req->request_id);
+
+		frame_done_args.hw_ctx = frame_req->hw_ctx;
+		frame_done_args.ctx_hw_private =
+			frame_req->hw_ctx->ctx_hw_private;
+		frame_done_args.request_id = frame_req->request_id;
+		frame_done_args.hw_req_private = &frame_req->hw_req_private;
+
+		if (hw_device->hw_intf->hw_ops.process_cmd) {
+			rc = hw_device->hw_intf->hw_ops.process_cmd(
+				hw_device->hw_intf->hw_priv,
+				CAM_FD_HW_CMD_FRAME_DONE,
+				&frame_done_args, sizeof(frame_done_args));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in CMD_PRESTART %d",
+					rc);
+				frame_abort = true;
+				goto notify_context;
+			}
+		}
+
+		frame_abort = false;
+	}
+
+	trace_cam_irq_handled("FD", irq_type);
+
+notify_context:
+	/* Do a callback to inform frame done or stop done */
+	if (frame_req->hw_ctx->event_cb) {
+		struct cam_hw_done_event_data buf_data;
+
+		CAM_DBG(CAM_FD, "FrameHALT : Frame[%lld]",
+			frame_req->request_id);
+
+		buf_data.num_handles = frame_req->num_hw_update_entries;
+		buf_data.request_id = frame_req->request_id;
+
+		rc = frame_req->hw_ctx->event_cb(frame_req->hw_ctx->cb_priv,
+			frame_abort, &buf_data);
+		if (rc)
+			CAM_ERR(CAM_FD, "Error in event cb handling %d", rc);
+	}
+
+	/*
+	 * Now we can set hw device is free to process further frames.
+	 * Note - Do not change state to IDLE until we read the frame results,
+	 * Otherwise, other thread may schedule frame processing before
+	 * reading current frame's results. Also, we need to set to IDLE state
+	 * in case some error happens after getting this irq callback
+	 */
+	mutex_lock(&hw_device->lock);
+	hw_device->ready_to_process = true;
+	hw_device->req_id = -1;
+	hw_device->cur_hw_ctx = NULL;
+	CAM_DBG(CAM_FD, "ready_to_process=%d", hw_device->ready_to_process);
+	mutex_unlock(&hw_device->lock);
+
+put_req_in_free_list:
+	rc = cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in putting frame req in free list");
+		/* continue */
+	}
+
+submit_next_frame:
+	/* Check if there are any frames pending for processing and submit */
+	rc = cam_fd_mgr_util_submit_frame(hw_mgr, NULL);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error while submit frame, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_irq_cb(void *data, enum cam_fd_hw_irq_type irq_type)
+{
+	struct cam_fd_hw_mgr *hw_mgr = &g_fd_hw_mgr;
+	int rc = 0;
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct cam_fd_mgr_work_data *work_data;
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_slock, flags);
+	task = cam_req_mgr_workq_get_task(hw_mgr->work);
+	if (!task) {
+		CAM_ERR(CAM_FD, "no empty task available");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
+		return -ENOMEM;
+	}
+
+	work_data = (struct cam_fd_mgr_work_data *)task->payload;
+	work_data->type = CAM_FD_WORK_IRQ;
+	work_data->irq_type = irq_type;
+
+	task->process_cb = cam_fd_mgr_workq_irq_cb;
+	rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in enqueue work task, rc=%d", rc);
+
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_slock, flags);
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query = hw_get_caps_args;
+	struct cam_fd_query_cap_cmd query_fd;
+	void __user *caps_handle =
+		u64_to_user_ptr(query->caps_handle);
+
+	if (copy_from_user(&query_fd, caps_handle,
+		sizeof(struct cam_fd_query_cap_cmd))) {
+		CAM_ERR(CAM_FD, "Failed in copy from user, rc=%d", rc);
+		return -EFAULT;
+	}
+
+	query_fd  = hw_mgr->fd_caps;
+
+	CAM_DBG(CAM_FD,
+		"IOMMU device(%d, %d), CDM(%d, %d), versions %d.%d, %d.%d",
+		query_fd.device_iommu.secure, query_fd.device_iommu.non_secure,
+		query_fd.cdm_iommu.secure, query_fd.cdm_iommu.non_secure,
+		query_fd.hw_caps.core_version.major,
+		query_fd.hw_caps.core_version.minor,
+		query_fd.hw_caps.wrapper_version.major,
+		query_fd.hw_caps.wrapper_version.minor);
+
+	if (copy_to_user(caps_handle, &query_fd,
+		sizeof(struct cam_fd_query_cap_cmd)))
+		rc = -EFAULT;
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_acquire_args *acquire_args =
+		(struct cam_hw_acquire_args *)hw_acquire_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_acquire_dev_info fd_acquire_args;
+	int rc;
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_FD, "Invalid acquire args %pK", acquire_args);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&fd_acquire_args,
+		(void __user *)acquire_args->acquire_info,
+		sizeof(struct cam_fd_acquire_dev_info))) {
+		CAM_ERR(CAM_FD, "Copy from user failed");
+		return -EFAULT;
+	}
+
+	CAM_DBG(CAM_FD, "Acquire : mode=%d, get_raw_results=%d, priority=%d",
+		fd_acquire_args.mode, fd_acquire_args.get_raw_results,
+		fd_acquire_args.priority);
+
+	/* get a free fd hw mgr ctx */
+	rc = cam_fd_mgr_util_get_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+	if (rc || !hw_ctx) {
+		CAM_ERR(CAM_FD, "Get hw context failed, rc=%d, hw_ctx=%pK",
+			rc, hw_ctx);
+		return -EINVAL;
+	}
+
+	if (fd_acquire_args.get_raw_results && !hw_mgr->raw_results_available) {
+		CAM_ERR(CAM_FD, "HW cannot support raw results %d (%d)",
+			fd_acquire_args.get_raw_results,
+			hw_mgr->raw_results_available);
+		goto put_ctx;
+	}
+
+	if (!(fd_acquire_args.mode & hw_mgr->supported_modes)) {
+		CAM_ERR(CAM_FD, "HW cannot support requested mode 0x%x (0x%x)",
+			fd_acquire_args.mode, hw_mgr->supported_modes);
+		rc = -EPERM;
+		goto put_ctx;
+	}
+
+	rc = cam_fd_mgr_util_select_device(hw_mgr, hw_ctx, &fd_acquire_args);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in selecting device, rc=%d", rc);
+		goto put_ctx;
+	}
+
+	hw_ctx->ctx_in_use = true;
+	hw_ctx->hw_mgr = hw_mgr;
+	hw_ctx->get_raw_results = fd_acquire_args.get_raw_results;
+	hw_ctx->mode = fd_acquire_args.mode;
+
+	/* Save incoming cam core info into hw ctx*/
+	hw_ctx->cb_priv = acquire_args->context_data;
+	hw_ctx->event_cb = acquire_args->event_cb;
+
+	/* Update out args */
+	acquire_args->ctxt_to_hw_map = hw_ctx;
+
+	cam_fd_mgr_util_put_ctx(&hw_mgr->used_ctx_list, &hw_ctx);
+
+	return 0;
+put_ctx:
+	list_del_init(&hw_ctx->list);
+	cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+	return rc;
+}
+
+static int cam_fd_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_release_args *release_args = hw_release_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	int rc;
+
+	if (!hw_mgr_priv || !hw_release_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK, %pK",
+			hw_mgr_priv, hw_release_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	rc = cam_fd_mgr_util_release_device(hw_mgr, hw_ctx);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in release device, rc=%d", rc);
+
+	hw_ctx->ctx_in_use = false;
+	list_del_init(&hw_ctx->list);
+	cam_fd_mgr_util_put_ctx(&hw_mgr->free_ctx_list, &hw_ctx);
+
+	return 0;
+}
+
+static int cam_fd_mgr_hw_start(void *hw_mgr_priv, void *mgr_start_args)
+{
+	int rc = 0;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_start_args *hw_mgr_start_args =
+		(struct cam_hw_start_args *)mgr_start_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_init_args hw_init_args;
+
+	if (!hw_mgr_priv || !hw_mgr_start_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_mgr_start_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_start_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_FD, "ctx index=%u, device_index=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	if (hw_device->hw_intf->hw_ops.init) {
+		hw_init_args.hw_ctx = hw_ctx;
+		hw_init_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.init(
+			hw_device->hw_intf->hw_priv, &hw_init_args,
+			sizeof(hw_init_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW Init %d", rc);
+			return rc;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid init function");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_flush_req(void *hw_mgr_priv,
+	struct cam_hw_flush_args *flush_args)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	uint32_t i = 0;
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
+
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_mgr->frame_req_mutex);
+	for (i = 0; i < flush_args->num_req_active; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_active[i];
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_pending_list_high, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+			break;
+		}
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_pending_list_normal, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+			break;
+		}
+
+		list_for_each_entry_safe(frame_req, req_temp,
+			&hw_mgr->frame_processing_list, list) {
+			if (frame_req->hw_ctx != hw_ctx)
+				continue;
+
+			if (frame_req->request_id != flush_req->request_id)
+				continue;
+
+			list_del_init(&frame_req->list);
+
+			mutex_lock(&hw_device->lock);
+			if ((hw_device->ready_to_process == true) ||
+				(hw_device->cur_hw_ctx != hw_ctx))
+				goto unlock_dev_flush_req;
+
+			if (hw_device->hw_intf->hw_ops.stop) {
+				hw_stop_args.hw_ctx = hw_ctx;
+				rc = hw_device->hw_intf->hw_ops.stop(
+					hw_device->hw_intf->hw_priv,
+					&hw_stop_args,
+					sizeof(hw_stop_args));
+				if (rc) {
+					CAM_ERR(CAM_FD,
+						"Failed in HW Stop %d", rc);
+					goto unlock_dev_flush_req;
+				}
+				hw_device->ready_to_process = true;
+			}
+
+unlock_dev_flush_req:
+			mutex_unlock(&hw_device->lock);
+			break;
+		}
+	}
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	for (i = 0; i < flush_args->num_req_pending; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_pending[i];
+		cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&flush_req);
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_flush_ctx(void *hw_mgr_priv,
+	struct cam_hw_flush_args *flush_args)
+{
+	int rc = 0;
+	struct cam_fd_mgr_frame_request *frame_req, *req_temp, *flush_req;
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_stop_args hw_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	uint32_t i = 0;
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)flush_args->ctxt_to_hw_map;
+
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_mgr->frame_req_mutex);
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_pending_list_high, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+	}
+
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_pending_list_normal, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+	}
+
+	list_for_each_entry_safe(frame_req, req_temp,
+		&hw_mgr->frame_processing_list, list) {
+		if (frame_req->hw_ctx != hw_ctx)
+			continue;
+
+		list_del_init(&frame_req->list);
+		mutex_lock(&hw_device->lock);
+		if ((hw_device->ready_to_process == true) ||
+			(hw_device->cur_hw_ctx != hw_ctx))
+			goto unlock_dev_flush_ctx;
+
+		if (hw_device->hw_intf->hw_ops.stop) {
+			hw_stop_args.hw_ctx = hw_ctx;
+			rc = hw_device->hw_intf->hw_ops.stop(
+				hw_device->hw_intf->hw_priv, &hw_stop_args,
+				sizeof(hw_stop_args));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in HW Stop %d", rc);
+				goto unlock_dev_flush_ctx;
+			}
+			hw_device->ready_to_process = true;
+		}
+
+unlock_dev_flush_ctx:
+	mutex_unlock(&hw_device->lock);
+	}
+	mutex_unlock(&hw_mgr->frame_req_mutex);
+
+	for (i = 0; i < flush_args->num_req_pending; i++) {
+		flush_req = (struct cam_fd_mgr_frame_request *)
+			flush_args->flush_req_pending[i];
+		cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&flush_req);
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_flush(void *hw_mgr_priv,
+	void *hw_flush_args)
+{
+	int rc = 0;
+	struct cam_hw_flush_args *flush_args =
+		(struct cam_hw_flush_args *)hw_flush_args;
+
+	if (!hw_mgr_priv || !hw_flush_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_flush_args);
+		return -EINVAL;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_REQ:
+		rc = cam_fd_mgr_hw_flush_req(hw_mgr_priv, flush_args);
+		break;
+	case CAM_FLUSH_TYPE_ALL:
+		rc = cam_fd_mgr_hw_flush_ctx(hw_mgr_priv, flush_args);
+		break;
+	default:
+		rc = -EINVAL;
+		CAM_ERR(CAM_FD, "Invalid flush type %d",
+			flush_args->flush_type);
+		break;
+	}
+	return rc;
+}
+
+static int cam_fd_mgr_hw_stop(void *hw_mgr_priv, void *mgr_stop_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_stop_args *hw_mgr_stop_args =
+		(struct cam_hw_stop_args *)mgr_stop_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_hw_deinit_args hw_deinit_args;
+	int rc = 0;
+
+	if (!hw_mgr_priv || !hw_mgr_stop_args) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK",
+			hw_mgr_priv, hw_mgr_stop_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)hw_mgr_stop_args->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+	CAM_DBG(CAM_FD, "ctx index=%u, hw_ctx=%d", hw_ctx->ctx_index,
+		hw_ctx->device_index);
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	CAM_DBG(CAM_FD, "FD Device ready_to_process = %d",
+		hw_device->ready_to_process);
+
+	if (hw_device->hw_intf->hw_ops.deinit) {
+		hw_deinit_args.hw_ctx = hw_ctx;
+		hw_deinit_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+		rc = hw_device->hw_intf->hw_ops.deinit(
+			hw_device->hw_intf->hw_priv, &hw_deinit_args,
+			sizeof(hw_deinit_args));
+		if (rc) {
+			CAM_ERR(CAM_FD, "Failed in HW DeInit %d", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_fd_mgr_hw_prepare_update(void *hw_mgr_priv,
+	void *hw_prepare_update_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_prepare_update_args *prepare =
+		(struct cam_hw_prepare_update_args *) hw_prepare_update_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_kmd_buf_info kmd_buf;
+	int rc;
+	struct cam_fd_hw_cmd_prestart_args prestart_args;
+	struct cam_fd_mgr_frame_request *frame_req;
+
+	if (!hw_mgr_priv || !hw_prepare_update_args) {
+		CAM_ERR(CAM_FD, "Invalid args %pK %pK",
+			hw_mgr_priv, hw_prepare_update_args);
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)prepare->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	rc = cam_fd_mgr_util_get_device(hw_mgr, hw_ctx, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in getting device %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_packet_validate(prepare->packet);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in packet validation %d", rc);
+		goto error;
+	}
+
+	rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in get kmd buf buffer %d", rc);
+		goto error;
+	}
+
+	CAM_DBG(CAM_FD,
+		"KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+		kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+		kmd_buf.size, kmd_buf.used_bytes);
+
+	/* We do not expect any patching, but just do it anyway */
+	rc = cam_packet_util_process_patches(prepare->packet,
+		hw_mgr->device_iommu.non_secure, -1);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Patch FD packet failed, rc=%d", rc);
+		return rc;
+	}
+
+	memset(&prestart_args, 0x0, sizeof(prestart_args));
+	prestart_args.ctx_hw_private = hw_ctx->ctx_hw_private;
+	prestart_args.hw_ctx = hw_ctx;
+	prestart_args.request_id = prepare->packet->header.request_id;
+
+	rc = cam_fd_mgr_util_parse_generic_cmd_buffer(hw_ctx, prepare->packet,
+		&prestart_args);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in parsing gerneric cmd buffer %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_prepare_io_buf_info(
+		hw_mgr->device_iommu.non_secure, prepare,
+		prestart_args.input_buf, prestart_args.output_buf,
+		CAM_FD_MAX_IO_BUFFERS);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in prepare IO Buf %d", rc);
+		goto error;
+	}
+
+	rc = cam_fd_mgr_util_prepare_hw_update_entries(hw_mgr, prepare,
+		&prestart_args, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in hw update entries %d", rc);
+		goto error;
+	}
+
+	/* get a free frame req from free list */
+	rc = cam_fd_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+	if (rc || !frame_req) {
+		CAM_ERR(CAM_FD, "Get frame_req failed, rc=%d, hw_ctx=%pK",
+			rc, hw_ctx);
+		return -ENOMEM;
+	}
+
+	/* Setup frame request info and queue to pending list */
+	frame_req->hw_ctx = hw_ctx;
+	frame_req->request_id = prepare->packet->header.request_id;
+	/* This has to be passed to HW while calling hw_ops->start */
+	frame_req->hw_req_private = prestart_args.hw_req_private;
+
+	/*
+	 * Save the current frame_req into priv,
+	 * this will come as priv while hw_config
+	 */
+	prepare->priv = frame_req;
+
+	CAM_DBG(CAM_FD, "FramePrepare : Frame[%lld]", frame_req->request_id);
+
+	return 0;
+error:
+	return rc;
+}
+
+static int cam_fd_mgr_hw_config(void *hw_mgr_priv, void *hw_config_args)
+{
+	struct cam_fd_hw_mgr *hw_mgr = (struct cam_fd_hw_mgr *)hw_mgr_priv;
+	struct cam_hw_config_args *config =
+		(struct cam_hw_config_args *) hw_config_args;
+	struct cam_fd_hw_mgr_ctx *hw_ctx;
+	struct cam_fd_mgr_frame_request *frame_req;
+	int rc;
+	int i;
+
+	if (!hw_mgr || !config) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK", hw_mgr, config);
+		return -EINVAL;
+	}
+
+	if (!config->num_hw_update_entries) {
+		CAM_ERR(CAM_FD, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	hw_ctx = (struct cam_fd_hw_mgr_ctx *)config->ctxt_to_hw_map;
+	if (!hw_ctx || !hw_ctx->ctx_in_use) {
+		CAM_ERR(CAM_FD, "Invalid context is used, hw_ctx=%pK", hw_ctx);
+		return -EPERM;
+	}
+
+	frame_req = config->priv;
+
+	trace_cam_apply_req("FD", frame_req->request_id);
+	CAM_DBG(CAM_FD, "FrameHWConfig : Frame[%lld]", frame_req->request_id);
+
+	frame_req->num_hw_update_entries = config->num_hw_update_entries;
+	for (i = 0; i < config->num_hw_update_entries; i++) {
+		frame_req->hw_update_entries[i] = config->hw_update_entries[i];
+		CAM_DBG(CAM_FD, "PreStart HWEntry[%d] : %d %d %d %d %pK",
+			frame_req->hw_update_entries[i].handle,
+			frame_req->hw_update_entries[i].offset,
+			frame_req->hw_update_entries[i].len,
+			frame_req->hw_update_entries[i].flags,
+			frame_req->hw_update_entries[i].addr);
+	}
+
+	if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
+		CAM_DBG(CAM_FD, "Insert frame into prio0 queue");
+		rc = cam_fd_mgr_util_put_frame_req(
+			&hw_mgr->frame_pending_list_high, &frame_req);
+	} else {
+		CAM_DBG(CAM_FD, "Insert frame into prio1 queue");
+		rc = cam_fd_mgr_util_put_frame_req(
+			&hw_mgr->frame_pending_list_normal, &frame_req);
+	}
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in queuing frame req, rc=%d", rc);
+		goto put_free_list;
+	}
+
+	rc = cam_fd_mgr_util_schedule_frame_worker_task(hw_mgr);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Worker task scheduling failed %d", rc);
+		goto remove_and_put_free_list;
+	}
+
+	return 0;
+
+remove_and_put_free_list:
+
+	if (hw_ctx->priority == CAM_FD_PRIORITY_HIGH) {
+		CAM_DBG(CAM_FD, "Removing frame into prio0 queue");
+		cam_fd_mgr_util_get_frame_req(
+			&hw_mgr->frame_pending_list_high, &frame_req);
+	} else {
+		CAM_DBG(CAM_FD, "Removing frame into prio1 queue");
+		cam_fd_mgr_util_get_frame_req(
+			&hw_mgr->frame_pending_list_normal, &frame_req);
+	}
+put_free_list:
+	cam_fd_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+		&frame_req);
+
+	return rc;
+}
+
+int cam_fd_hw_mgr_deinit(struct device_node *of_node)
+{
+	CAM_DBG(CAM_FD, "HW Mgr Deinit");
+
+	cam_req_mgr_workq_destroy(&g_fd_hw_mgr.work);
+
+	cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_DETACH);
+	cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+
+	mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
+	mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
+
+	return 0;
+}
+
+int cam_fd_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr_intf)
+{
+	int count, i, rc = 0;
+	struct cam_hw_intf *hw_intf = NULL;
+	struct cam_fd_hw_mgr_ctx *hw_mgr_ctx;
+	struct cam_fd_device *hw_device;
+	struct cam_fd_mgr_frame_request *frame_req;
+
+	if (!of_node || !hw_mgr_intf) {
+		CAM_ERR(CAM_FD, "Invalid args of_node %pK hw_mgr_intf %pK",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	memset(&g_fd_hw_mgr, 0x0, sizeof(g_fd_hw_mgr));
+	memset(hw_mgr_intf, 0x0, sizeof(*hw_mgr_intf));
+
+	mutex_init(&g_fd_hw_mgr.ctx_mutex);
+	mutex_init(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_init(&g_fd_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&g_fd_hw_mgr.hw_mgr_slock);
+
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count || (count > CAM_FD_HW_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid compat names in dev tree %d", count);
+		return -EINVAL;
+	}
+	g_fd_hw_mgr.num_devices = count;
+
+	g_fd_hw_mgr.raw_results_available = false;
+	g_fd_hw_mgr.supported_modes = 0;
+
+	for (i = 0; i < count; i++) {
+		hw_device = &g_fd_hw_mgr.hw_device[i];
+
+		rc = cam_fd_mgr_util_pdev_get_hw_intf(of_node, i, &hw_intf);
+		if (rc) {
+			CAM_ERR(CAM_FD, "hw intf from pdev failed, rc=%d", rc);
+			return rc;
+		}
+
+		mutex_init(&hw_device->lock);
+
+		hw_device->valid = true;
+		hw_device->hw_intf = hw_intf;
+		hw_device->ready_to_process = true;
+
+		if (hw_device->hw_intf->hw_ops.process_cmd) {
+			struct cam_fd_hw_cmd_set_irq_cb irq_cb_args;
+
+			irq_cb_args.cam_fd_hw_mgr_cb = cam_fd_mgr_irq_cb;
+			irq_cb_args.data = hw_device;
+
+			rc = hw_device->hw_intf->hw_ops.process_cmd(
+				hw_device->hw_intf->hw_priv,
+				CAM_FD_HW_CMD_REGISTER_CALLBACK,
+				&irq_cb_args, sizeof(irq_cb_args));
+			if (rc) {
+				CAM_ERR(CAM_FD,
+					"Failed in REGISTER_CALLBACK %d", rc);
+				return rc;
+			}
+		}
+
+		if (hw_device->hw_intf->hw_ops.get_hw_caps) {
+			rc = hw_device->hw_intf->hw_ops.get_hw_caps(
+				hw_intf->hw_priv, &hw_device->hw_caps,
+				sizeof(hw_device->hw_caps));
+			if (rc) {
+				CAM_ERR(CAM_FD, "Failed in get_hw_caps %d", rc);
+				return rc;
+			}
+
+			g_fd_hw_mgr.raw_results_available |=
+				hw_device->hw_caps.raw_results_available;
+			g_fd_hw_mgr.supported_modes |=
+				hw_device->hw_caps.supported_modes;
+
+			CAM_DBG(CAM_FD,
+				"Device[mode=%d, raw=%d], Mgr[mode=%d, raw=%d]",
+				hw_device->hw_caps.supported_modes,
+				hw_device->hw_caps.raw_results_available,
+				g_fd_hw_mgr.supported_modes,
+				g_fd_hw_mgr.raw_results_available);
+		}
+	}
+
+	INIT_LIST_HEAD(&g_fd_hw_mgr.free_ctx_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.used_ctx_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_free_list);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_high);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_pending_list_normal);
+	INIT_LIST_HEAD(&g_fd_hw_mgr.frame_processing_list);
+
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+	g_fd_hw_mgr.device_iommu.secure = -1;
+	g_fd_hw_mgr.cdm_iommu.non_secure = -1;
+	g_fd_hw_mgr.cdm_iommu.secure = -1;
+
+	rc = cam_smmu_get_handle("fd",
+		&g_fd_hw_mgr.device_iommu.non_secure);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Get iommu handle failed, rc=%d", rc);
+		goto destroy_mutex;
+	}
+
+	rc = cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_ATTACH);
+	if (rc) {
+		CAM_ERR(CAM_FD, "FD attach iommu handle failed, rc=%d", rc);
+		goto destroy_smmu;
+	}
+
+	rc = cam_cdm_get_iommu_handle("fd", &g_fd_hw_mgr.cdm_iommu);
+	if (rc)
+		CAM_DBG(CAM_FD, "Failed to acquire the CDM iommu handles");
+
+	CAM_DBG(CAM_FD, "iommu handles : device(%d, %d), cdm(%d, %d)",
+		g_fd_hw_mgr.device_iommu.non_secure,
+		g_fd_hw_mgr.device_iommu.secure,
+		g_fd_hw_mgr.cdm_iommu.non_secure,
+		g_fd_hw_mgr.cdm_iommu.secure);
+
+	/* Init hw mgr contexts and add to free list */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		hw_mgr_ctx = &g_fd_hw_mgr.ctx_pool[i];
+
+		memset(hw_mgr_ctx, 0x0, sizeof(*hw_mgr_ctx));
+		INIT_LIST_HEAD(&hw_mgr_ctx->list);
+
+		hw_mgr_ctx->ctx_index = i;
+		hw_mgr_ctx->device_index = -1;
+		hw_mgr_ctx->hw_mgr = &g_fd_hw_mgr;
+
+		list_add_tail(&hw_mgr_ctx->list, &g_fd_hw_mgr.free_ctx_list);
+	}
+
+	/* Init hw mgr frame requests and add to free list */
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		frame_req = &g_fd_hw_mgr.frame_req[i];
+
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		INIT_LIST_HEAD(&frame_req->list);
+
+		list_add_tail(&frame_req->list, &g_fd_hw_mgr.frame_free_list);
+	}
+
+	rc = cam_req_mgr_workq_create("cam_fd_worker", CAM_FD_WORKQ_NUM_TASK,
+		&g_fd_hw_mgr.work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Unable to create a worker, rc=%d", rc);
+		goto detach_smmu;
+	}
+
+	for (i = 0; i < CAM_FD_WORKQ_NUM_TASK; i++)
+		g_fd_hw_mgr.work->task.pool[i].payload =
+			&g_fd_hw_mgr.work_data[i];
+
+	/* Setup hw cap so that we can just return the info when requested */
+	memset(&g_fd_hw_mgr.fd_caps, 0, sizeof(g_fd_hw_mgr.fd_caps));
+	g_fd_hw_mgr.fd_caps.device_iommu = g_fd_hw_mgr.device_iommu;
+	g_fd_hw_mgr.fd_caps.cdm_iommu = g_fd_hw_mgr.cdm_iommu;
+	g_fd_hw_mgr.fd_caps.hw_caps = g_fd_hw_mgr.hw_device[0].hw_caps;
+
+	CAM_DBG(CAM_FD,
+		"IOMMU device(%d, %d), CDM(%d, %d) versions core[%d.%d], wrapper[%d.%d]",
+		g_fd_hw_mgr.fd_caps.device_iommu.secure,
+		g_fd_hw_mgr.fd_caps.device_iommu.non_secure,
+		g_fd_hw_mgr.fd_caps.cdm_iommu.secure,
+		g_fd_hw_mgr.fd_caps.cdm_iommu.non_secure,
+		g_fd_hw_mgr.fd_caps.hw_caps.core_version.major,
+		g_fd_hw_mgr.fd_caps.hw_caps.core_version.minor,
+		g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.major,
+		g_fd_hw_mgr.fd_caps.hw_caps.wrapper_version.minor);
+
+	hw_mgr_intf->hw_mgr_priv = &g_fd_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_fd_mgr_hw_get_caps;
+	hw_mgr_intf->hw_acquire = cam_fd_mgr_hw_acquire;
+	hw_mgr_intf->hw_release = cam_fd_mgr_hw_release;
+	hw_mgr_intf->hw_start = cam_fd_mgr_hw_start;
+	hw_mgr_intf->hw_stop = cam_fd_mgr_hw_stop;
+	hw_mgr_intf->hw_prepare_update = cam_fd_mgr_hw_prepare_update;
+	hw_mgr_intf->hw_config = cam_fd_mgr_hw_config;
+	hw_mgr_intf->hw_read = NULL;
+	hw_mgr_intf->hw_write = NULL;
+	hw_mgr_intf->hw_close = NULL;
+	hw_mgr_intf->hw_flush = cam_fd_mgr_hw_flush;
+
+	return rc;
+
+detach_smmu:
+	cam_smmu_ops(g_fd_hw_mgr.device_iommu.non_secure, CAM_SMMU_DETACH);
+destroy_smmu:
+	cam_smmu_destroy_handle(g_fd_hw_mgr.device_iommu.non_secure);
+	g_fd_hw_mgr.device_iommu.non_secure = -1;
+destroy_mutex:
+	mutex_destroy(&g_fd_hw_mgr.ctx_mutex);
+	mutex_destroy(&g_fd_hw_mgr.frame_req_mutex);
+	mutex_destroy(&g_fd_hw_mgr.hw_mgr_mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
new file mode 100644
index 0000000..621043c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_MGR_H_
+#define _CAM_FD_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <media/cam_fd.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_fd_hw_intf.h"
+
+#define CAM_FD_HW_MAX            1
+#define CAM_FD_WORKQ_NUM_TASK    10
+
+struct cam_fd_hw_mgr;
+
+/**
+ * enum cam_fd_mgr_work_type - Type of worker task
+ *
+ * @CAM_FD_WORK_FRAME : Work type indicating frame task
+ * @CAM_FD_WORK_IRQ   : Work type indicating irq task
+ */
+enum cam_fd_mgr_work_type {
+	CAM_FD_WORK_FRAME,
+	CAM_FD_WORK_IRQ,
+};
+
+/**
+ * struct cam_fd_hw_mgr_ctx : FD HW Mgr context
+ *
+ * @list            : List pointer used to maintain this context
+ *                    in free, used list
+ * @ctx_index       : Index of this context
+ * @ctx_in_use      : Whether this context is in use
+ * @event_cb        : Event callback pointer to notify cam core context
+ * @cb_priv         : Event callback private pointer
+ * @hw_mgr          : Pointer to hw manager
+ * @get_raw_results : Whether this context needs raw results
+ * @mode            : Mode in which this context runs
+ * @device_index    : HW Device used by this context
+ * @ctx_hw_private  : HW layer's private context pointer for this context
+ * @priority        : Priority of this context
+ */
+struct cam_fd_hw_mgr_ctx {
+	struct list_head               list;
+	uint32_t                       ctx_index;
+	bool                           ctx_in_use;
+	cam_hw_event_cb_func           event_cb;
+	void                          *cb_priv;
+	struct cam_fd_hw_mgr          *hw_mgr;
+	bool                           get_raw_results;
+	enum cam_fd_hw_mode            mode;
+	int32_t                        device_index;
+	void                          *ctx_hw_private;
+	uint32_t                       priority;
+};
+
+/**
+ * struct cam_fd_device : FD HW Device
+ *
+ * @hw_caps          : This FD device's capabilities
+ * @hw_intf          : FD device's interface information
+ * @ready_to_process : Whether this device is ready to process next frame
+ * @num_ctxts        : Number of context currently running on this device
+ * @valid            : Whether this device is valid
+ * @lock             : Lock used for protectin
+ * @cur_hw_ctx       : current hw context running in the device
+ * @req_id           : current processing req id
+ */
+struct cam_fd_device {
+	struct cam_fd_hw_caps     hw_caps;
+	struct cam_hw_intf       *hw_intf;
+	bool                      ready_to_process;
+	uint32_t                  num_ctxts;
+	bool                      valid;
+	struct mutex              lock;
+	struct cam_fd_hw_mgr_ctx *cur_hw_ctx;
+	int64_t                   req_id;
+};
+
+/**
+ * struct cam_fd_mgr_frame_request : Frame request information maintained
+ *                                   in HW Mgr layer
+ *
+ * @list                  : List pointer used to maintain this request in
+ *                          free, pending, processing request lists
+ * @request_id            : Request ID corresponding to this request
+ * @hw_ctx                : HW context from which this request is coming
+ * @hw_req_private        : HW layer's private information specific to
+ *                          this request
+ * @hw_update_entries     : HW update entries corresponding to this request
+ *                          which needs to be submitted to HW through CDM
+ * @num_hw_update_entries : Number of HW update entries
+ */
+struct cam_fd_mgr_frame_request {
+	struct list_head               list;
+	uint64_t                       request_id;
+	struct cam_fd_hw_mgr_ctx      *hw_ctx;
+	struct cam_fd_hw_req_private   hw_req_private;
+	struct cam_hw_update_entry     hw_update_entries[CAM_FD_MAX_HW_ENTRIES];
+	uint32_t                       num_hw_update_entries;
+};
+
+/**
+ * struct cam_fd_mgr_work_data : HW Mgr work data information
+ *
+ * @type     : Type of work
+ * @irq_type : IRQ type when this work is queued because of irq callback
+ */
+struct cam_fd_mgr_work_data {
+	enum cam_fd_mgr_work_type      type;
+	enum cam_fd_hw_irq_type        irq_type;
+};
+
+/**
+ * struct cam_fd_hw_mgr : FD HW Mgr information
+ *
+ * @free_ctx_list             : List of free contexts available for acquire
+ * @used_ctx_list             : List of contexts that are acquired
+ * @frame_free_list           : List of free frame requests available
+ * @frame_pending_list_high   : List of high priority frame requests pending
+ *                              for processing
+ * @frame_pending_list_normal : List of normal priority frame requests pending
+ *                              for processing
+ * @frame_processing_list     : List of frame requests currently being
+ *                              processed currently. Generally maximum one
+ *                              request would be present in this list
+ * @hw_mgr_mutex              : Mutex to protect hw mgr data when accessed
+ *                              from multiple threads
+ * @hw_mgr_slock              : Spin lock to protect hw mgr data when accessed
+ *                              from multiple threads
+ * @ctx_mutex                 : Mutex to protect context list
+ * @frame_req_mutex           : Mutex to protect frame request list
+ * @device_iommu              : Device IOMMU information
+ * @cdm_iommu                 : CDM IOMMU information
+ * @hw_device                 : Underlying HW device information
+ * @num_devices               : Number of HW devices available
+ * @raw_results_available     : Whether raw results available in this driver
+ * @supported_modes           : Supported modes by this driver
+ * @ctx_pool                  : List of context
+ * @frame_req                 : List of frame requests
+ * @work                      : Worker handle
+ * @work_data                 : Worker data
+ * @fd_caps                   : FD driver capabilities
+ */
+struct cam_fd_hw_mgr {
+	struct list_head                   free_ctx_list;
+	struct list_head                   used_ctx_list;
+	struct list_head                   frame_free_list;
+	struct list_head                   frame_pending_list_high;
+	struct list_head                   frame_pending_list_normal;
+	struct list_head                   frame_processing_list;
+	struct mutex                       hw_mgr_mutex;
+	spinlock_t                         hw_mgr_slock;
+	struct mutex                       ctx_mutex;
+	struct mutex                       frame_req_mutex;
+	struct cam_iommu_handle            device_iommu;
+	struct cam_iommu_handle            cdm_iommu;
+	struct cam_fd_device               hw_device[CAM_FD_HW_MAX];
+	uint32_t                           num_devices;
+	bool                               raw_results_available;
+	uint32_t                           supported_modes;
+	struct cam_fd_hw_mgr_ctx           ctx_pool[CAM_CTX_MAX];
+	struct cam_fd_mgr_frame_request    frame_req[CAM_CTX_REQ_MAX];
+	struct cam_req_mgr_core_workq     *work;
+	struct cam_fd_mgr_work_data        work_data[CAM_FD_WORKQ_NUM_TASK];
+	struct cam_fd_query_cap_cmd        fd_caps;
+};
+
+#endif /* _CAM_FD_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h
new file mode 100644
index 0000000..34007b3e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/cam_fd_hw_mgr_intf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_MGR_INTF_H_
+#define _CAM_FD_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+
+int cam_fd_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr_intf);
+int cam_fd_hw_mgr_deinit(struct device_node *of_node);
+
+#endif /* _CAM_FD_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/Makefile b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/Makefile
new file mode 100644
index 0000000..3992a32
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_fd_hw_dev.o cam_fd_hw_core.o cam_fd_hw_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
new file mode 100644
index 0000000..d55ac96
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.c
@@ -0,0 +1,1161 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_trace.h"
+
+#define CAM_FD_REG_VAL_PAIR_SIZE 256
+
+static uint32_t cam_fd_cdm_write_reg_val_pair(uint32_t *buffer,
+	uint32_t index, uint32_t reg_offset, uint32_t reg_value)
+{
+	buffer[index++] = reg_offset;
+	buffer[index++] = reg_value;
+
+	CAM_DBG(CAM_FD, "FD_CDM_CMD: Base[FD_CORE] Offset[0x%8x] Value[0x%8x]",
+		reg_offset, reg_value);
+
+	return index;
+}
+
+static void cam_fd_hw_util_cdm_callback(uint32_t handle, void *userdata,
+	enum cam_cdm_cb_status status, uint64_t cookie)
+{
+	trace_cam_cdm_cb("FD", status);
+	CAM_DBG(CAM_FD, "CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+		handle, userdata, status, cookie);
+}
+
+static void cam_fd_hw_util_enable_power_on_settings(struct cam_hw_info *fd_hw)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+
+	if (hw_static_info->enable_errata_wa.single_irq_only == false) {
+		/* Enable IRQs here */
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			hw_static_info->irq_mask);
+	}
+
+	/* QoS settings */
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.vbif_req_priority,
+		hw_static_info->qos_priority);
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.vbif_priority_level,
+		hw_static_info->qos_priority_level);
+}
+
+int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_caps *hw_caps)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+	uint32_t reg_value;
+
+	if (!hw_static_info) {
+		CAM_ERR(CAM_FD, "Invalid hw info data");
+		return -EINVAL;
+	}
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.version);
+	hw_caps->core_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf00, 0x8);
+	hw_caps->core_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0, 0x4);
+	hw_caps->core_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf, 0x0);
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.wrapper_version);
+	hw_caps->wrapper_version.major =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1c);
+	hw_caps->wrapper_version.minor =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->wrapper_version.incr =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	hw_caps->raw_results_available =
+		hw_static_info->results.raw_results_available;
+	hw_caps->supported_modes = hw_static_info->supported_modes;
+
+	CAM_DBG(CAM_FD, "core:%d.%d.%d wrapper:%d.%d.%d intermediate:%d",
+		hw_caps->core_version.major, hw_caps->core_version.minor,
+		hw_caps->core_version.incr, hw_caps->wrapper_version.major,
+		hw_caps->wrapper_version.minor, hw_caps->wrapper_version.incr,
+		hw_caps->raw_results_available);
+
+	return 0;
+}
+
+static int cam_fd_hw_util_fdwrapper_sync_reset(struct cam_hw_info *fd_hw)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	long time_left;
+
+	/* Before triggering reset to HW, clear the reset complete */
+	reinit_completion(&fd_core->reset_complete);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.control, 0x1);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE));
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.sw_reset, 0x1);
+
+	time_left = wait_for_completion_timeout(&fd_core->reset_complete,
+		msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
+	if (time_left <= 0)
+		CAM_WARN(CAM_FD, "HW reset timeout time_left=%ld", time_left);
+
+	CAM_DBG(CAM_FD, "FD Wrapper SW Sync Reset complete");
+
+	return 0;
+}
+
+
+static int cam_fd_hw_util_fdwrapper_halt(struct cam_hw_info *fd_hw)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	long time_left;
+
+	/* Before triggering halt to HW, clear halt complete */
+	reinit_completion(&fd_core->halt_complete);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE));
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.hw_stop, 0x1);
+
+	time_left = wait_for_completion_timeout(&fd_core->halt_complete,
+		msecs_to_jiffies(CAM_FD_HW_HALT_RESET_TIMEOUT));
+	if (time_left <= 0)
+		CAM_WARN(CAM_FD, "HW halt timeout time_left=%ld", time_left);
+
+	CAM_DBG(CAM_FD, "FD Wrapper Halt complete");
+
+	return 0;
+}
+
+static int cam_fd_hw_util_processcmd_prestart(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_cmd_prestart_args *prestart_args)
+{
+	struct cam_hw_soc_info *soc_info = &fd_hw->soc_info;
+	struct cam_fd_hw_static_info *hw_static_info =
+		((struct cam_fd_core *)fd_hw->core_info)->hw_static_info;
+	struct cam_fd_ctx_hw_private *ctx_hw_private =
+		prestart_args->ctx_hw_private;
+	uint32_t size, size_required = 0;
+	uint32_t mem_base;
+	uint32_t *cmd_buf_addr = prestart_args->cmd_buf_addr;
+	uint32_t reg_val_pair[CAM_FD_REG_VAL_PAIR_SIZE];
+	uint32_t num_cmds = 0;
+	int i;
+	struct cam_fd_hw_io_buffer *io_buf;
+	struct cam_fd_hw_req_private *req_private;
+	uint32_t available_size = prestart_args->size;
+	bool work_buffer_configured = false;
+
+	if (!ctx_hw_private || !cmd_buf_addr) {
+		CAM_ERR(CAM_FD, "Invalid input prestart args %pK %pK",
+			ctx_hw_private, cmd_buf_addr);
+		return -EINVAL;
+	}
+
+	if (prestart_args->get_raw_results &&
+		!hw_static_info->results.raw_results_available) {
+		CAM_ERR(CAM_FD, "Raw results not supported %d %d",
+			prestart_args->get_raw_results,
+			hw_static_info->results.raw_results_available);
+		return -EINVAL;
+	}
+
+	req_private = &prestart_args->hw_req_private;
+	req_private->ctx_hw_private = prestart_args->ctx_hw_private;
+	req_private->request_id = prestart_args->request_id;
+	req_private->get_raw_results = prestart_args->get_raw_results;
+	req_private->fd_results = NULL;
+	req_private->raw_results = NULL;
+
+	/* Start preparing CDM register values that KMD has to insert */
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
+		hw_static_info->core_regs.control, 0x1);
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, num_cmds,
+		hw_static_info->core_regs.control, 0x0);
+
+	for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
+		io_buf = &prestart_args->input_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+			CAM_ERR(CAM_FD, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_FD_INPUT_PORT_ID_IMAGE: {
+			if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
+				CAM_ERR(CAM_FD,
+					"Invalid reg_val pair size %d, %d",
+					num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
+				return -EINVAL;
+			}
+
+			num_cmds = cam_fd_cdm_write_reg_val_pair(
+				reg_val_pair, num_cmds,
+				hw_static_info->core_regs.image_addr,
+				io_buf->io_addr[0]);
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Invalid resource type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < CAM_FD_MAX_IO_BUFFERS; i++) {
+		io_buf = &prestart_args->output_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+			CAM_ERR(CAM_FD, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_FD_OUTPUT_PORT_ID_RESULTS: {
+			uint32_t face_results_offset;
+
+			size_required = hw_static_info->results.max_faces *
+				hw_static_info->results.per_face_entries * 4;
+
+			if (io_buf->io_cfg->planes[0].plane_stride <
+				size_required) {
+				CAM_ERR(CAM_FD, "Invalid results size %d %d",
+					io_buf->io_cfg->planes[0].plane_stride,
+					size_required);
+				return -EINVAL;
+			}
+
+			req_private->fd_results =
+				(struct cam_fd_results *)io_buf->cpu_addr[0];
+
+			face_results_offset =
+				(uint8_t *)&req_private->fd_results->faces[0] -
+				(uint8_t *)req_private->fd_results;
+
+			if (hw_static_info->ro_mode_supported) {
+				if ((num_cmds + 4) > CAM_FD_REG_VAL_PAIR_SIZE) {
+					CAM_ERR(CAM_FD,
+						"Invalid reg_val size %d, %d",
+						num_cmds,
+						CAM_FD_REG_VAL_PAIR_SIZE);
+					return -EINVAL;
+				}
+				/*
+				 * Face data actually starts 16bytes later in
+				 * the io buffer  Check cam_fd_results.
+				 */
+				num_cmds = cam_fd_cdm_write_reg_val_pair(
+					reg_val_pair, num_cmds,
+					hw_static_info->core_regs.result_addr,
+					io_buf->io_addr[0] +
+					face_results_offset);
+				num_cmds = cam_fd_cdm_write_reg_val_pair(
+					reg_val_pair, num_cmds,
+					hw_static_info->core_regs.ro_mode,
+					0x1);
+
+				req_private->ro_mode_enabled = true;
+			} else {
+				req_private->ro_mode_enabled = false;
+			}
+			break;
+		}
+		case CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS: {
+			size_required =
+				hw_static_info->results.raw_results_entries *
+				sizeof(uint32_t);
+
+			if (io_buf->io_cfg->planes[0].plane_stride <
+				size_required) {
+				CAM_ERR(CAM_FD, "Invalid results size %d %d",
+					io_buf->io_cfg->planes[0].plane_stride,
+					size_required);
+				return -EINVAL;
+			}
+
+			req_private->raw_results =
+				(uint32_t *)io_buf->cpu_addr[0];
+			break;
+		}
+		case CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER: {
+			if ((num_cmds + 2) > CAM_FD_REG_VAL_PAIR_SIZE) {
+				CAM_ERR(CAM_FD,
+					"Invalid reg_val pair size %d, %d",
+					num_cmds, CAM_FD_REG_VAL_PAIR_SIZE);
+				return -EINVAL;
+			}
+
+			num_cmds = cam_fd_cdm_write_reg_val_pair(
+				reg_val_pair, num_cmds,
+				hw_static_info->core_regs.work_addr,
+				io_buf->io_addr[0]);
+
+			work_buffer_configured = true;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FD, "Invalid resource type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	if (!req_private->fd_results || !work_buffer_configured) {
+		CAM_ERR(CAM_FD, "Invalid IO Buffers results=%pK work=%d",
+			req_private->fd_results, work_buffer_configured);
+		return -EINVAL;
+	}
+
+	/* First insert CHANGE_BASE command */
+	size = ctx_hw_private->cdm_ops->cdm_required_size_changebase();
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "buf size:%d is not sufficient, expected: %d",
+			prestart_args->size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info,
+		((struct cam_fd_soc_private *)soc_info->soc_private)->
+		regbase_index[CAM_FD_REG_CORE]);
+
+	ctx_hw_private->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
+		num_cmds/2);
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
+			available_size, size);
+		return -ENOMEM;
+	}
+	ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
+		reg_val_pair);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	/* Update pre_config_buf_size in bytes */
+	prestart_args->pre_config_buf_size =
+		prestart_args->size - available_size;
+
+	/* Insert start trigger command into CDM as post config commands. */
+	num_cmds = cam_fd_cdm_write_reg_val_pair(reg_val_pair, 0,
+		hw_static_info->core_regs.control, 0x2);
+	size = ctx_hw_private->cdm_ops->cdm_required_size_reg_random(
+		num_cmds/2);
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_FD, "Insufficient size:%d , expected size:%d",
+			available_size, size);
+		return -ENOMEM;
+	}
+	ctx_hw_private->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmds/2,
+		reg_val_pair);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	prestart_args->post_config_buf_size = size * 4;
+
+	CAM_DBG(CAM_FD, "PreConfig [%pK %d], PostConfig[%pK %d]",
+		prestart_args->cmd_buf_addr, prestart_args->pre_config_buf_size,
+		cmd_buf_addr, prestart_args->post_config_buf_size);
+
+	for (i = 0; i < (prestart_args->pre_config_buf_size +
+		prestart_args->post_config_buf_size) / 4; i++)
+		CAM_DBG(CAM_FD, "CDM KMD Commands [%d] : [%pK] [0x%x]", i,
+			&prestart_args->cmd_buf_addr[i],
+			prestart_args->cmd_buf_addr[i]);
+
+	return 0;
+}
+
+static int cam_fd_hw_util_processcmd_frame_done(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_frame_done_args *frame_done_args)
+{
+	struct cam_fd_core *fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	struct cam_fd_hw_static_info *hw_static_info = fd_core->hw_static_info;
+	struct cam_fd_hw_req_private *req_private;
+	uint32_t base, face_cnt;
+	uint32_t *buffer;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	if ((fd_core->core_state != CAM_FD_CORE_STATE_IDLE) ||
+		(fd_core->results_valid == false) ||
+		!fd_core->hw_req_private) {
+		CAM_ERR(CAM_FD,
+			"Invalid state for results state=%d, results=%d %pK",
+			fd_core->core_state, fd_core->results_valid,
+			fd_core->hw_req_private);
+		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+		return -EINVAL;
+	}
+	fd_core->core_state = CAM_FD_CORE_STATE_READING_RESULTS;
+	req_private = fd_core->hw_req_private;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	/*
+	 * Copy the register value as is into output buffers.
+	 * Wehter we are copying the output data by reading registers or
+	 * programming output buffer directly to HW must be transparent to UMD.
+	 * In case HW supports writing face count value directly into
+	 * DDR memory in future, these values should match.
+	 */
+	req_private->fd_results->face_count =
+		cam_fd_soc_register_read(&fd_hw->soc_info, CAM_FD_REG_CORE,
+		hw_static_info->core_regs.result_cnt);
+
+	face_cnt = req_private->fd_results->face_count & 0x3F;
+
+	if (face_cnt > hw_static_info->results.max_faces) {
+		CAM_WARN(CAM_FD, "Face count greater than max %d %d",
+			face_cnt, hw_static_info->results.max_faces);
+		face_cnt = hw_static_info->results.max_faces;
+	}
+
+	CAM_DBG(CAM_FD, "ReqID[%lld] Faces Detected = %d",
+		req_private->request_id, face_cnt);
+
+	/*
+	 * We need to read the face data information from registers only
+	 * if one of below is true
+	 * 1. RO mode is not set. i.e FD HW doesn't write face data into
+	 *    DDR memory
+	 * 2. On the current chipset, results written into DDR memory by FD HW
+	 *    are not gauranteed to be correct
+	 */
+	if (!req_private->ro_mode_enabled ||
+		hw_static_info->enable_errata_wa.ro_mode_results_invalid) {
+		buffer = (uint32_t *)&req_private->fd_results->faces[0];
+		base = hw_static_info->core_regs.results_reg_base;
+
+		/*
+		 * Write register values as is into face data buffer.  Its UMD
+		 * driver responsibility to interpret the data and extract face
+		 * properties from output buffer. Think in case output buffer
+		 * is directly programmed to HW, then KMD has no control to
+		 * extract the face properties and UMD anyway has to extract
+		 * face properties. So we follow the same approach and keep
+		 * this transparent to UMD.
+		 */
+		for (i = 0;
+			i < (face_cnt *
+			hw_static_info->results.per_face_entries); i++) {
+			*buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
+				CAM_FD_REG_CORE, base + (i * 0x4));
+			CAM_DBG(CAM_FD, "FaceData[%d] : 0x%x", i / 4, *buffer);
+			buffer++;
+		}
+	}
+
+	if (req_private->get_raw_results &&
+		req_private->raw_results &&
+		hw_static_info->results.raw_results_available) {
+		buffer = req_private->raw_results;
+		base = hw_static_info->core_regs.raw_results_reg_base;
+
+		for (i = 0;
+			i < hw_static_info->results.raw_results_entries;
+			i++) {
+			*buffer = cam_fd_soc_register_read(&fd_hw->soc_info,
+				CAM_FD_REG_CORE, base + (i * 0x4));
+			CAM_DBG(CAM_FD, "RawData[%d] : 0x%x", i, *buffer);
+			buffer++;
+		}
+	}
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->hw_req_private = NULL;
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	return 0;
+}
+
+irqreturn_t cam_fd_hw_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)data;
+	struct cam_fd_core *fd_core;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_fd_hw_static_info *hw_static_info;
+	uint32_t reg_value;
+	enum cam_fd_hw_irq_type irq_type = CAM_FD_IRQ_FRAME_DONE;
+	uint32_t num_irqs = 0;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid data in IRQ callback");
+		return IRQ_NONE;
+	}
+
+	fd_core = (struct cam_fd_core *) fd_hw->core_info;
+	soc_info = &fd_hw->soc_info;
+	hw_static_info = fd_core->hw_static_info;
+
+	reg_value = cam_fd_soc_register_read(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.irq_status);
+
+	CAM_DBG(CAM_FD, "FD IRQ status 0x%x", reg_value);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.irq_clear,
+		reg_value);
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE)) {
+		complete_all(&fd_core->halt_complete);
+		irq_type = CAM_FD_IRQ_HALT_DONE;
+		num_irqs++;
+	}
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE)) {
+		complete_all(&fd_core->reset_complete);
+		irq_type = CAM_FD_IRQ_RESET_DONE;
+		num_irqs++;
+	}
+
+	if (reg_value & CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE)) {
+		complete_all(&fd_core->processing_complete);
+		irq_type = CAM_FD_IRQ_FRAME_DONE;
+		num_irqs++;
+	}
+
+	/*
+	 * We should never get an IRQ callback with no or more than one mask.
+	 * Validate first to make sure nothing going wrong.
+	 */
+	if (num_irqs != 1) {
+		CAM_ERR(CAM_FD,
+			"Invalid number of IRQs, value=0x%x, num_irqs=%d",
+			reg_value, num_irqs);
+		return IRQ_NONE;
+	}
+
+	trace_cam_irq_activated("FD", irq_type);
+
+	if (irq_type == CAM_FD_IRQ_HALT_DONE) {
+		/*
+		 * Do not send HALT IRQ callback to Hw Mgr,
+		 * a reset would always follow
+		 */
+		return IRQ_HANDLED;
+	}
+
+	spin_lock(&fd_core->spin_lock);
+	/* Do not change state to IDLE on HALT IRQ. Reset must follow halt */
+	if ((irq_type == CAM_FD_IRQ_RESET_DONE) ||
+		(irq_type == CAM_FD_IRQ_FRAME_DONE)) {
+
+		fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+		if (irq_type == CAM_FD_IRQ_FRAME_DONE)
+			fd_core->results_valid = true;
+
+		CAM_DBG(CAM_FD, "FD IRQ type %d, state=%d",
+			irq_type, fd_core->core_state);
+	}
+	spin_unlock(&fd_core->spin_lock);
+
+	if (fd_core->irq_cb.cam_fd_hw_mgr_cb)
+		fd_core->irq_cb.cam_fd_hw_mgr_cb(fd_core->irq_cb.data,
+			irq_type);
+
+	return IRQ_HANDLED;
+}
+
+int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_caps *fd_hw_caps =
+		(struct cam_fd_hw_caps *)get_hw_cap_args;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_FD, "Invalid input pointers %pK %pK",
+			hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	*fd_hw_caps = fd_core->hw_caps;
+
+	CAM_DBG(CAM_FD, "core:%d.%d wrapper:%d.%d mode:%d, raw:%d",
+		fd_hw_caps->core_version.major,
+		fd_hw_caps->core_version.minor,
+		fd_hw_caps->wrapper_version.major,
+		fd_hw_caps->wrapper_version.minor,
+		fd_hw_caps->supported_modes,
+		fd_hw_caps->raw_results_available);
+
+	return 0;
+}
+
+int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_init_args *init_args =
+		(struct cam_fd_hw_init_args *)init_hw_args;
+	int rc = 0;
+	unsigned long flags;
+
+	if (!fd_hw || !init_args) {
+		CAM_ERR(CAM_FD, "Invalid argument %pK %pK", fd_hw, init_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_init_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_init_args));
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	mutex_lock(&fd_hw->hw_mutex);
+	CAM_DBG(CAM_FD, "FD HW Init ref count before %d", fd_hw->open_count);
+
+	if (fd_hw->open_count > 0) {
+		rc = 0;
+		goto cdm_streamon;
+	}
+
+	rc = cam_fd_soc_enable_resources(&fd_hw->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Enable SOC failed, rc=%d", rc);
+		goto unlock_return;
+	}
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	rc = cam_fd_hw_reset(hw_priv, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Reset Failed, rc=%d", rc);
+		goto disable_soc;
+	}
+
+	cam_fd_hw_util_enable_power_on_settings(fd_hw);
+
+cdm_streamon:
+	fd_hw->open_count++;
+	CAM_DBG(CAM_FD, "FD HW Init ref count after %d", fd_hw->open_count);
+
+	if (init_args->ctx_hw_private) {
+		struct cam_fd_ctx_hw_private *ctx_hw_private =
+			init_args->ctx_hw_private;
+
+		rc = cam_cdm_stream_on(ctx_hw_private->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_FD, "CDM StreamOn fail :handle=0x%x, rc=%d",
+				ctx_hw_private->cdm_handle, rc);
+			fd_hw->open_count--;
+			if (!fd_hw->open_count)
+				goto disable_soc;
+		}
+	}
+
+	mutex_unlock(&fd_hw->hw_mutex);
+
+	return rc;
+
+disable_soc:
+	if (cam_fd_soc_disable_resources(&fd_hw->soc_info))
+		CAM_ERR(CAM_FD, "Error in disable soc resources");
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+unlock_return:
+	mutex_unlock(&fd_hw->hw_mutex);
+	return rc;
+}
+
+int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = hw_priv;
+	struct cam_fd_core *fd_core = NULL;
+	struct cam_fd_hw_deinit_args *deinit_args =
+		(struct cam_fd_hw_deinit_args *)deinit_hw_args;
+	int rc = 0;
+	unsigned long flags;
+
+	if (!fd_hw || !deinit_hw_args) {
+		CAM_ERR(CAM_FD, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_deinit_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_deinit_args));
+		return -EINVAL;
+	}
+
+	mutex_lock(&fd_hw->hw_mutex);
+	if (fd_hw->open_count == 0) {
+		mutex_unlock(&fd_hw->hw_mutex);
+		CAM_ERR(CAM_FD, "Error Unbalanced deinit");
+		return -EFAULT;
+	}
+
+	fd_hw->open_count--;
+	CAM_DBG(CAM_FD, "FD HW ref count=%d", fd_hw->open_count);
+
+	if (fd_hw->open_count > 0) {
+		rc = 0;
+		goto positive_ref_cnt;
+	}
+
+	rc = cam_fd_soc_disable_resources(&fd_hw->soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "Failed in Disable SOC, rc=%d", rc);
+
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+
+	/* With the ref_cnt correct, this should never happen */
+	WARN_ON(!fd_core);
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->core_state = CAM_FD_CORE_STATE_POWERDOWN;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+positive_ref_cnt:
+	if (deinit_args->ctx_hw_private) {
+		struct cam_fd_ctx_hw_private *ctx_hw_private =
+			deinit_args->ctx_hw_private;
+
+		rc = cam_cdm_stream_off(ctx_hw_private->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_FD,
+				"Failed in CDM StreamOff, handle=0x%x, rc=%d",
+				ctx_hw_private->cdm_handle, rc);
+		}
+	}
+
+	mutex_unlock(&fd_hw->hw_mutex);
+	return rc;
+}
+
+int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_static_info *hw_static_info;
+	struct cam_hw_soc_info *soc_info;
+	unsigned long flags;
+	int rc;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid input handle");
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	hw_static_info = fd_core->hw_static_info;
+	soc_info = &fd_hw->soc_info;
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+		(fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
+		CAM_ERR(CAM_FD, "Reset not allowed in %d state",
+			fd_core->core_state);
+		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	fd_core->results_valid = false;
+	fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.cgc_disable, 0x1);
+
+	rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
+		return rc;
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.cgc_disable, 0x0);
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	return rc;
+}
+
+int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_static_info *hw_static_info;
+	struct cam_fd_hw_cmd_start_args *start_args =
+		(struct cam_fd_hw_cmd_start_args *)hw_start_args;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	unsigned long flags;
+	int rc;
+
+	if (!hw_priv || !start_args) {
+		CAM_ERR(CAM_FD, "Invalid input args %pK %pK", hw_priv,
+			start_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_cmd_start_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_cmd_start_args));
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	hw_static_info = fd_core->hw_static_info;
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	if (fd_core->core_state != CAM_FD_CORE_STATE_IDLE) {
+		CAM_ERR(CAM_FD, "Cannot start in %d state",
+			fd_core->core_state);
+		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	/*
+	 * We are about to start FD HW processing, save the request
+	 * private data which is being processed by HW. Once the frame
+	 * processing is finished, process_cmd(FRAME_DONE) should be called
+	 * with same hw_req_private as input.
+	 */
+	fd_core->hw_req_private = start_args->hw_req_private;
+	fd_core->core_state = CAM_FD_CORE_STATE_PROCESSING;
+	fd_core->results_valid = false;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	ctx_hw_private = start_args->ctx_hw_private;
+
+	/* Before starting HW process, clear processing complete */
+	reinit_completion(&fd_core->processing_complete);
+
+	if (hw_static_info->enable_errata_wa.single_irq_only) {
+		cam_fd_soc_register_write(&fd_hw->soc_info, CAM_FD_REG_WRAPPER,
+			hw_static_info->wrapper_regs.irq_mask,
+			CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE));
+	}
+
+	if (start_args->num_hw_update_entries > 0) {
+		struct cam_cdm_bl_request *cdm_cmd = ctx_hw_private->cdm_cmd;
+		struct cam_hw_update_entry *cmd;
+		int i;
+
+		cdm_cmd->cmd_arrary_count = start_args->num_hw_update_entries;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = false;
+		cdm_cmd->userdata = NULL;
+		cdm_cmd->cookie = 0;
+
+		for (i = 0 ; i <= start_args->num_hw_update_entries; i++) {
+			cmd = (start_args->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		rc = cam_cdm_submit_bls(ctx_hw_private->cdm_handle, cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_FD,
+				"Failed to submit cdm commands, rc=%d", rc);
+			goto error;
+		}
+	} else {
+		CAM_ERR(CAM_FD, "Invalid number of hw update entries");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	return 0;
+error:
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	return rc;
+}
+
+int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_fd_core *fd_core;
+	struct cam_fd_hw_static_info *hw_static_info;
+	struct cam_hw_soc_info *soc_info;
+	unsigned long flags;
+	int rc;
+
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid input handle");
+		return -EINVAL;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	hw_static_info = fd_core->hw_static_info;
+	soc_info = &fd_hw->soc_info;
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	if ((fd_core->core_state == CAM_FD_CORE_STATE_POWERDOWN) ||
+		(fd_core->core_state == CAM_FD_CORE_STATE_RESET_PROGRESS)) {
+		CAM_ERR(CAM_FD, "Reset not allowed in %d state",
+			fd_core->core_state);
+		spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+		return -EINVAL;
+	}
+
+	fd_core->results_valid = false;
+	fd_core->core_state = CAM_FD_CORE_STATE_RESET_PROGRESS;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.cgc_disable, 0x1);
+
+	rc = cam_fd_hw_util_fdwrapper_halt(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in HALT rc=%d", rc);
+		return rc;
+	}
+
+	/* HALT must be followed by RESET */
+	rc = cam_fd_hw_util_fdwrapper_sync_reset(fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in RESET rc=%d", rc);
+		return rc;
+	}
+
+	cam_fd_soc_register_write(soc_info, CAM_FD_REG_WRAPPER,
+		hw_static_info->wrapper_regs.cgc_disable, 0x0);
+
+	spin_lock_irqsave(&fd_core->spin_lock, flags);
+	fd_core->core_state = CAM_FD_CORE_STATE_IDLE;
+	spin_unlock_irqrestore(&fd_core->spin_lock, flags);
+
+	return rc;
+}
+
+int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	struct cam_fd_hw_reserve_args *reserve_args =
+		(struct cam_fd_hw_reserve_args *)hw_reserve_args;
+	struct cam_cdm_acquire_data cdm_acquire;
+	struct cam_cdm_bl_request *cdm_cmd;
+	int i;
+
+	if (!fd_hw || !reserve_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, reserve_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_reserve_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_reserve_args));
+		return -EINVAL;
+	}
+
+	cdm_cmd = kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			((CAM_FD_MAX_HW_ENTRIES - 1) *
+			sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!cdm_cmd)
+		return -ENOMEM;
+
+	ctx_hw_private = kzalloc(sizeof(struct cam_fd_ctx_hw_private),
+		GFP_KERNEL);
+	if (!ctx_hw_private) {
+		kfree(cdm_cmd);
+		return -ENOMEM;
+	}
+
+	memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+	strlcpy(cdm_acquire.identifier, "fd", sizeof("fd"));
+	cdm_acquire.cell_index = fd_hw->soc_info.index;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = ctx_hw_private;
+	cdm_acquire.cam_cdm_callback = cam_fd_hw_util_cdm_callback;
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.base_array_cnt = fd_hw->soc_info.num_reg_map;
+	for (i = 0; i < fd_hw->soc_info.num_reg_map; i++)
+		cdm_acquire.base_array[i] = &fd_hw->soc_info.reg_map[i];
+
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to acquire the CDM HW");
+		goto error;
+	}
+
+	ctx_hw_private->hw_ctx = reserve_args->hw_ctx;
+	ctx_hw_private->fd_hw = fd_hw;
+	ctx_hw_private->mode = reserve_args->mode;
+	ctx_hw_private->cdm_handle = cdm_acquire.handle;
+	ctx_hw_private->cdm_ops = cdm_acquire.ops;
+	ctx_hw_private->cdm_cmd = cdm_cmd;
+
+	reserve_args->ctx_hw_private = ctx_hw_private;
+
+	CAM_DBG(CAM_FD, "private=%pK, hw_ctx=%pK, mode=%d, cdm_handle=0x%x",
+		ctx_hw_private, ctx_hw_private->hw_ctx, ctx_hw_private->mode,
+		ctx_hw_private->cdm_handle);
+
+	return 0;
+error:
+	kfree(ctx_hw_private);
+	kfree(cdm_cmd);
+	return rc;
+}
+
+int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+	struct cam_fd_ctx_hw_private *ctx_hw_private;
+	struct cam_fd_hw_release_args *release_args =
+		(struct cam_fd_hw_release_args *)hw_release_args;
+
+	if (!fd_hw || !release_args) {
+		CAM_ERR(CAM_FD, "Invalid input %pK, %pK", fd_hw, release_args);
+		return -EINVAL;
+	}
+
+	if (arg_size != sizeof(struct cam_fd_hw_release_args)) {
+		CAM_ERR(CAM_FD, "Invalid arg size %u, %zu", arg_size,
+			sizeof(struct cam_fd_hw_release_args));
+		return -EINVAL;
+	}
+
+	ctx_hw_private =
+		(struct cam_fd_ctx_hw_private *)release_args->ctx_hw_private;
+
+	rc = cam_cdm_release(ctx_hw_private->cdm_handle);
+	if (rc)
+		CAM_ERR(CAM_FD, "Release cdm handle failed, handle=0x%x, rc=%d",
+			ctx_hw_private->cdm_handle, rc);
+
+	kfree(ctx_hw_private->cdm_cmd);
+	kfree(ctx_hw_private);
+	release_args->ctx_hw_private = NULL;
+
+	return 0;
+}
+
+int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *fd_hw = (struct cam_hw_info *)hw_priv;
+	int rc = -EINVAL;
+
+	if (!hw_priv || !cmd_args ||
+		(cmd_type >= CAM_FD_HW_CMD_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid arguments %pK %pK %d", hw_priv,
+			cmd_args, cmd_type);
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_FD_HW_CMD_REGISTER_CALLBACK: {
+		struct cam_fd_hw_cmd_set_irq_cb *irq_cb_args;
+		struct cam_fd_core *fd_core =
+			(struct cam_fd_core *)fd_hw->core_info;
+
+		if (sizeof(struct cam_fd_hw_cmd_set_irq_cb) != arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		irq_cb_args = (struct cam_fd_hw_cmd_set_irq_cb *)cmd_args;
+		fd_core->irq_cb.cam_fd_hw_mgr_cb =
+			irq_cb_args->cam_fd_hw_mgr_cb;
+		fd_core->irq_cb.data = irq_cb_args->data;
+		rc = 0;
+		break;
+	}
+	case CAM_FD_HW_CMD_PRESTART: {
+		struct cam_fd_hw_cmd_prestart_args *prestart_args;
+
+		if (sizeof(struct cam_fd_hw_cmd_prestart_args) != arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		prestart_args = (struct cam_fd_hw_cmd_prestart_args *)cmd_args;
+		rc = cam_fd_hw_util_processcmd_prestart(fd_hw, prestart_args);
+		break;
+	}
+	case CAM_FD_HW_CMD_FRAME_DONE: {
+		struct cam_fd_hw_frame_done_args *cmd_frame_results;
+
+		if (sizeof(struct cam_fd_hw_frame_done_args) !=
+			arg_size) {
+			CAM_ERR(CAM_FD, "cmd_type %d, size mismatch %d",
+				cmd_type, arg_size);
+			break;
+		}
+
+		cmd_frame_results =
+			(struct cam_fd_hw_frame_done_args *)cmd_args;
+		rc = cam_fd_hw_util_processcmd_frame_done(fd_hw,
+			cmd_frame_results);
+		break;
+	}
+	default:
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
new file mode 100644
index 0000000..f960b2d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_core.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_CORE_H_
+#define _CAM_FD_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_defs.h>
+#include <media/cam_fd.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_soc.h"
+
+#define CAM_FD_IRQ_TO_MASK(irq)        (1 << (irq))
+#define CAM_FD_MASK_TO_IRQ(mask, irq)  ((mask) >> (irq))
+
+#define CAM_FD_HW_HALT_RESET_TIMEOUT   750
+
+/**
+ * enum cam_fd_core_state - FD Core internal states
+ *
+ * @CAM_FD_CORE_STATE_POWERDOWN       : Indicates FD core is powered down
+ * @CAM_FD_CORE_STATE_IDLE            : Indicates FD HW is in idle state.
+ *                                      Core can be in this state when it is
+ *                                      ready to process frames or when
+ *                                      processing is finished and results are
+ *                                      available
+ * @CAM_FD_CORE_STATE_PROCESSING      : Indicates FD core is processing frame
+ * @CAM_FD_CORE_STATE_READING_RESULTS : Indicates results are being read from
+ *                                      FD core
+ * @CAM_FD_CORE_STATE_RESET_PROGRESS  :  Indicates FD Core is in reset state
+ */
+enum cam_fd_core_state {
+	CAM_FD_CORE_STATE_POWERDOWN,
+	CAM_FD_CORE_STATE_IDLE,
+	CAM_FD_CORE_STATE_PROCESSING,
+	CAM_FD_CORE_STATE_READING_RESULTS,
+	CAM_FD_CORE_STATE_RESET_PROGRESS,
+};
+
+/**
+ * struct cam_fd_ctx_hw_private : HW private information for a specific hw ctx.
+ *                                This information is populated by HW layer on
+ *                                reserve() and given back to HW Mgr as private
+ *                                data for the hw context. This private_data
+ *                                has to be passed by HW Mgr layer while
+ *                                further HW layer calls
+ *
+ * @hw_ctx           : Corresponding hw_ctx pointer
+ * @fd_hw            : FD HW info pointer
+ * @cdm_handle       : CDM Handle for this context
+ * @cdm_ops          : CDM Ops
+ * @cdm_cmd          : CDM command pointer
+ * @mode             : Mode this context is running
+ * @curr_req_private : Current Request information
+ *
+ */
+struct cam_fd_ctx_hw_private {
+	void                          *hw_ctx;
+	struct cam_hw_info            *fd_hw;
+	uint32_t                       cdm_handle;
+	struct cam_cdm_utils_ops      *cdm_ops;
+	struct cam_cdm_bl_request     *cdm_cmd;
+	enum cam_fd_hw_mode            mode;
+	struct cam_fd_hw_req_private  *curr_req_private;
+};
+
+/**
+ * struct cam_fd_core_regs : FD HW Core register offsets info
+ *
+ * @version              : Offset of version register
+ * @control              : Offset of control register
+ * @result_cnt           : Offset of result count register
+ * @result_addr          : Offset of results address register
+ * @image_addr           : Offset of image address register
+ * @work_addr            : Offset of work address register
+ * @ro_mode              : Offset of ro_mode register
+ * @results_reg_base     : Offset of results_reg_base register
+ * @raw_results_reg_base : Offset of raw_results_reg_base register
+ *
+ */
+struct cam_fd_core_regs {
+	uint32_t       version;
+	uint32_t       control;
+	uint32_t       result_cnt;
+	uint32_t       result_addr;
+	uint32_t       image_addr;
+	uint32_t       work_addr;
+	uint32_t       ro_mode;
+	uint32_t       results_reg_base;
+	uint32_t       raw_results_reg_base;
+};
+
+/**
+ * struct cam_fd_core_regs : FD HW Wrapper register offsets info
+ *
+ * @wrapper_version     : Offset of wrapper_version register
+ * @cgc_disable         : Offset of cgc_disable register
+ * @hw_stop             : Offset of hw_stop register
+ * @sw_reset            : Offset of sw_reset register
+ * @vbif_req_priority   : Offset of vbif_req_priority register
+ * @vbif_priority_level : Offset of vbif_priority_level register
+ * @vbif_done_status    : Offset of vbif_done_status register
+ * @irq_mask            : Offset of irq mask register
+ * @irq_status          : Offset of irq status register
+ * @irq_clear           : Offset of irq clear register
+ *
+ */
+struct cam_fd_wrapper_regs {
+	uint32_t       wrapper_version;
+	uint32_t       cgc_disable;
+	uint32_t       hw_stop;
+	uint32_t       sw_reset;
+	uint32_t       vbif_req_priority;
+	uint32_t       vbif_priority_level;
+	uint32_t       vbif_done_status;
+	uint32_t       irq_mask;
+	uint32_t       irq_status;
+	uint32_t       irq_clear;
+};
+
+/**
+ * struct cam_fd_hw_errata_wa : FD HW Errata workaround enable/dsiable info
+ *
+ * @single_irq_only         : Whether to enable only one irq at any time
+ * @ro_mode_enable_always   : Whether to enable ro mode always
+ * @ro_mode_results_invalid : Whether results written directly into output
+ *                            memory by HW are valid or not
+ */
+struct cam_fd_hw_errata_wa {
+	bool   single_irq_only;
+	bool   ro_mode_enable_always;
+	bool   ro_mode_results_invalid;
+};
+
+/**
+ * struct cam_fd_hw_results_prop : FD HW Results properties
+ *
+ * @max_faces             : Maximum number of faces supported
+ * @per_face_entries      : Number of register with properties for each face
+ * @raw_results_entries   : Number of raw results entries for the full search
+ * @raw_results_available : Whether raw results available on this HW
+ *
+ */
+struct cam_fd_hw_results_prop {
+	uint32_t       max_faces;
+	uint32_t       per_face_entries;
+	uint32_t       raw_results_entries;
+	bool           raw_results_available;
+};
+
+/**
+ * struct cam_fd_hw_static_info : FD HW information based on HW version
+ *
+ * @core_version       : Core version of FD HW
+ * @wrapper_version    : Wrapper version of FD HW
+ * @core_regs          : Register offset information for core registers
+ * @wrapper_regs       : Register offset information for wrapper registers
+ * @results            : Information about results available on this HW
+ * @enable_errata_wa   : Errata workaround information
+ * @irq_mask           : IRQ mask to enable
+ * @qos_priority       : QoS priority setting for this chipset
+ * @qos_priority_level : QoS priority level setting for this chipset
+ * @supported_modes    : Supported HW modes on this HW version
+ * @ro_mode_supported  : Whether RO mode is supported on this HW
+ *
+ */
+struct cam_fd_hw_static_info {
+	struct cam_hw_version          core_version;
+	struct cam_hw_version          wrapper_version;
+	struct cam_fd_core_regs        core_regs;
+	struct cam_fd_wrapper_regs     wrapper_regs;
+	struct cam_fd_hw_results_prop  results;
+	struct cam_fd_hw_errata_wa     enable_errata_wa;
+	uint32_t                       irq_mask;
+	uint32_t                       qos_priority;
+	uint32_t                       qos_priority_level;
+	uint32_t                       supported_modes;
+	bool                           ro_mode_supported;
+};
+
+/**
+ * struct cam_fd_core : FD HW core data structure
+ *
+ * @hw_static_info      : HW information specific to version
+ * @hw_caps             : HW capabilities
+ * @core_state          : Current HW state
+ * @processing_complete : Whether processing is complete
+ * @reset_complete      : Whether reset is complete
+ * @halt_complete       : Whether halt is complete
+ * @hw_req_private      : Request that is being currently processed by HW
+ * @results_valid       : Whether HW frame results are available to get
+ * @spin_lock           : Mutex to protect shared data in hw layer
+ * @irq_cb              : HW Manager callback information
+ *
+ */
+struct cam_fd_core {
+	struct cam_fd_hw_static_info   *hw_static_info;
+	struct cam_fd_hw_caps           hw_caps;
+	enum cam_fd_core_state          core_state;
+	struct completion               processing_complete;
+	struct completion               reset_complete;
+	struct completion               halt_complete;
+	struct cam_fd_hw_req_private   *hw_req_private;
+	bool                            results_valid;
+	spinlock_t                      spin_lock;
+	struct cam_fd_hw_cmd_set_irq_cb irq_cb;
+};
+
+int cam_fd_hw_util_get_hw_caps(struct cam_hw_info *fd_hw,
+	struct cam_fd_hw_caps *hw_caps);
+irqreturn_t cam_fd_hw_irq(int irq_num, void *data);
+
+int cam_fd_hw_get_hw_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size);
+int cam_fd_hw_init(void *hw_priv, void *init_hw_args, uint32_t arg_size);
+int cam_fd_hw_deinit(void *hw_priv, void *deinit_hw_args, uint32_t arg_size);
+int cam_fd_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_fd_hw_reserve(void *hw_priv, void *hw_reserve_args, uint32_t arg_size);
+int cam_fd_hw_release(void *hw_priv, void *hw_release_args, uint32_t arg_size);
+int cam_fd_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size);
+int cam_fd_hw_halt_reset(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_fd_hw_read(void *hw_priv, void *read_args, uint32_t arg_size);
+int cam_fd_hw_write(void *hw_priv, void *write_args, uint32_t arg_size);
+int cam_fd_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+#endif /* _CAM_FD_HW_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c
new file mode 100644
index 0000000..c88812f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_dev.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_fd_hw_intf.h"
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+#include "cam_fd_hw_v41.h"
+#include "cam_fd_hw_v501.h"
+
+static char fd_dev_name[8];
+
+static int cam_fd_hw_dev_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *fd_hw;
+	struct cam_hw_intf *fd_hw_intf;
+	struct cam_fd_core *fd_core;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_fd_hw_static_info *hw_static_info = NULL;
+	int rc = 0;
+	uint32_t hw_idx;
+	struct cam_fd_hw_init_args init_args;
+	struct cam_fd_hw_deinit_args deinit_args;
+
+	fd_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!fd_hw_intf)
+		return -ENOMEM;
+
+	fd_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!fd_hw) {
+		kfree(fd_hw_intf);
+		return -ENOMEM;
+	}
+
+	fd_core = kzalloc(sizeof(struct cam_fd_core), GFP_KERNEL);
+	if (!fd_core) {
+		kfree(fd_hw);
+		kfree(fd_hw_intf);
+		return -ENOMEM;
+	}
+	of_property_read_u32(pdev->dev.of_node,
+			"cell-index", &hw_idx);
+
+	fd_hw_intf->hw_priv = fd_hw;
+	fd_hw->core_info = fd_core;
+	fd_hw_intf->hw_idx = hw_idx;
+
+	memset(fd_dev_name, 0, sizeof(fd_dev_name));
+	snprintf(fd_dev_name, sizeof(fd_dev_name),
+		"fd%1u", fd_hw_intf->hw_idx);
+
+	fd_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	fd_hw->soc_info.pdev = pdev;
+	fd_hw->soc_info.dev = &pdev->dev;
+	fd_hw->soc_info.dev_name = fd_dev_name;
+	fd_hw->open_count = 0;
+	mutex_init(&fd_hw->hw_mutex);
+	spin_lock_init(&fd_hw->hw_lock);
+	init_completion(&fd_hw->hw_complete);
+
+	spin_lock_init(&fd_core->spin_lock);
+	init_completion(&fd_core->processing_complete);
+	init_completion(&fd_core->halt_complete);
+	init_completion(&fd_core->reset_complete);
+
+	fd_hw_intf->hw_ops.get_hw_caps = cam_fd_hw_get_hw_caps;
+	fd_hw_intf->hw_ops.init = cam_fd_hw_init;
+	fd_hw_intf->hw_ops.deinit = cam_fd_hw_deinit;
+	fd_hw_intf->hw_ops.reset = cam_fd_hw_reset;
+	fd_hw_intf->hw_ops.reserve = cam_fd_hw_reserve;
+	fd_hw_intf->hw_ops.release = cam_fd_hw_release;
+	fd_hw_intf->hw_ops.start = cam_fd_hw_start;
+	fd_hw_intf->hw_ops.stop = cam_fd_hw_halt_reset;
+	fd_hw_intf->hw_ops.read = NULL;
+	fd_hw_intf->hw_ops.write = NULL;
+	fd_hw_intf->hw_ops.process_cmd = cam_fd_hw_process_cmd;
+	fd_hw_intf->hw_type = CAM_HW_FD;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev || !match_dev->data) {
+		CAM_ERR(CAM_FD, "No Of_match data, %pK", match_dev);
+		rc = -EINVAL;
+		goto free_memory;
+	}
+	hw_static_info = (struct cam_fd_hw_static_info *)match_dev->data;
+	fd_core->hw_static_info = hw_static_info;
+
+	CAM_DBG(CAM_FD, "HW Static Info : version core[%d.%d] wrapper[%d.%d]",
+		hw_static_info->core_version.major,
+		hw_static_info->core_version.minor,
+		hw_static_info->wrapper_version.major,
+		hw_static_info->wrapper_version.minor);
+
+	rc = cam_fd_soc_init_resources(&fd_hw->soc_info, cam_fd_hw_irq, fd_hw);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to init soc, rc=%d", rc);
+		goto free_memory;
+	}
+
+	memset(&init_args, 0x0, sizeof(init_args));
+	memset(&deinit_args, 0x0, sizeof(deinit_args));
+	rc = cam_fd_hw_init(fd_hw, &init_args, sizeof(init_args));
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to hw init, rc=%d", rc);
+		goto deinit_platform_res;
+	}
+
+	rc = cam_fd_hw_util_get_hw_caps(fd_hw, &fd_core->hw_caps);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to get hw caps, rc=%d", rc);
+		goto deinit_hw;
+	}
+
+	rc = cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args));
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed to deinit hw, rc=%d", rc);
+		goto deinit_platform_res;
+	}
+
+	platform_set_drvdata(pdev, fd_hw_intf);
+	CAM_DBG(CAM_FD, "FD-%d probe successful", fd_hw_intf->hw_idx);
+
+	return rc;
+
+deinit_hw:
+	if (cam_fd_hw_deinit(fd_hw, &deinit_args, sizeof(deinit_args)))
+		CAM_ERR(CAM_FD, "Failed in hw deinit");
+deinit_platform_res:
+	if (cam_fd_soc_deinit_resources(&fd_hw->soc_info))
+		CAM_ERR(CAM_FD, "Failed in soc deinit");
+	mutex_destroy(&fd_hw->hw_mutex);
+free_memory:
+	kfree(fd_hw);
+	kfree(fd_hw_intf);
+	kfree(fd_core);
+
+	return rc;
+}
+
+static int cam_fd_hw_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct cam_hw_intf *fd_hw_intf;
+	struct cam_hw_info *fd_hw;
+	struct cam_fd_core *fd_core;
+
+	fd_hw_intf = platform_get_drvdata(pdev);
+	if (!fd_hw_intf) {
+		CAM_ERR(CAM_FD, "Invalid fd_hw_intf from pdev");
+		return -EINVAL;
+	}
+
+	fd_hw = fd_hw_intf->hw_priv;
+	if (!fd_hw) {
+		CAM_ERR(CAM_FD, "Invalid fd_hw from fd_hw_intf");
+		rc = -ENODEV;
+		goto free_fd_hw_intf;
+	}
+
+	fd_core = (struct cam_fd_core *)fd_hw->core_info;
+	if (!fd_core) {
+		CAM_ERR(CAM_FD, "Invalid fd_core from fd_hw");
+		rc = -EINVAL;
+		goto deinit_platform_res;
+	}
+
+	kfree(fd_core);
+
+deinit_platform_res:
+	rc = cam_fd_soc_deinit_resources(&fd_hw->soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "Error in FD soc deinit, rc=%d", rc);
+
+	mutex_destroy(&fd_hw->hw_mutex);
+	kfree(fd_hw);
+
+free_fd_hw_intf:
+	kfree(fd_hw_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_fd_hw_dt_match[] = {
+	{
+		.compatible = "qcom,fd41",
+		.data = &cam_fd_wrapper120_core410_info,
+	},
+	{
+		.compatible = "qcom,fd501",
+		.data = &cam_fd_wrapper200_core501_info,
+	},
+	{
+		.compatible = "qcom,fd501",
+		.data = &cam_fd_wrapper200_core501_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_fd_hw_dt_match);
+
+static struct platform_driver cam_fd_hw_driver = {
+	.probe = cam_fd_hw_dev_probe,
+	.remove = cam_fd_hw_dev_remove,
+	.driver = {
+		.name = "cam_fd_hw",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_fd_hw_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_fd_hw_init_module(void)
+{
+	return platform_driver_register(&cam_fd_hw_driver);
+}
+
+static void __exit cam_fd_hw_exit_module(void)
+{
+	platform_driver_unregister(&cam_fd_hw_driver);
+}
+
+module_init(cam_fd_hw_init_module);
+module_exit(cam_fd_hw_exit_module);
+MODULE_DESCRIPTION("CAM FD HW driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
new file mode 100644
index 0000000..911903e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_intf.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_INTF_H_
+#define _CAM_FD_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_fd.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+#define CAM_FD_MAX_IO_BUFFERS        5
+#define CAM_FD_MAX_HW_ENTRIES        5
+
+/**
+ * enum cam_fd_hw_type - Enum for FD HW type
+ *
+ * @CAM_HW_FD : FaceDetection HW type
+ */
+enum cam_fd_hw_type {
+	CAM_HW_FD,
+};
+
+/**
+ * enum cam_fd_hw_mode - Mode in which HW can run
+ *
+ * @CAM_FD_MODE_FACEDETECTION : Face Detection mode in which face search
+ *                              is done on the given frame
+ * @CAM_FD_MODE_PYRAMID       : Pyramid mode where a pyramid image is generated
+ *                              from an input image
+ */
+enum cam_fd_hw_mode {
+	CAM_FD_MODE_FACEDETECTION    = 0x1,
+	CAM_FD_MODE_PYRAMID          = 0x2,
+};
+
+/**
+ * enum cam_fd_priority - FD priority levels
+ *
+ * @CAM_FD_PRIORITY_HIGH   : Indicates high priority client, driver prioritizes
+ *                           frame requests coming from contexts with HIGH
+ *                           priority compared to context with normal priority
+ * @CAM_FD_PRIORITY_NORMAL : Indicates normal priority client
+ */
+enum cam_fd_priority {
+	CAM_FD_PRIORITY_HIGH         = 0x0,
+	CAM_FD_PRIORITY_NORMAL,
+};
+
+/**
+ * enum cam_fd_hw_irq_type - FD HW IRQ types
+ *
+ * @CAM_FD_IRQ_FRAME_DONE : Indicates frame processing is finished
+ * @CAM_FD_IRQ_HALT_DONE  : Indicates HW halt is finished
+ * @CAM_FD_IRQ_RESET_DONE : Indicates HW reset is finished
+ */
+enum cam_fd_hw_irq_type {
+	CAM_FD_IRQ_FRAME_DONE,
+	CAM_FD_IRQ_HALT_DONE,
+	CAM_FD_IRQ_RESET_DONE,
+};
+
+/**
+ * enum cam_fd_hw_cmd_type - FD HW layer custom commands
+ *
+ * @CAM_FD_HW_CMD_PRESTART          : Command to process pre-start settings
+ * @CAM_FD_HW_CMD_FRAME_DONE        : Command to process frame done settings
+ * @CAM_FD_HW_CMD_UPDATE_SOC        : Command to process soc update
+ * @CAM_FD_HW_CMD_REGISTER_CALLBACK : Command to set hw mgr callback
+ * @CAM_FD_HW_CMD_MAX               : Indicates max cmd
+ */
+enum cam_fd_hw_cmd_type {
+	CAM_FD_HW_CMD_PRESTART,
+	CAM_FD_HW_CMD_FRAME_DONE,
+	CAM_FD_HW_CMD_UPDATE_SOC,
+	CAM_FD_HW_CMD_REGISTER_CALLBACK,
+	CAM_FD_HW_CMD_MAX,
+};
+
+/**
+ * struct cam_fd_hw_io_buffer : FD HW IO Buffer information
+ *
+ * @valid    : Whether this IO Buf configuration is valid
+ * @io_cfg   : IO Configuration information
+ * @num_buf  : Number planes in io_addr, cpu_addr array
+ * @io_addr  : Array of IO address information for planes
+ * @cpu_addr : Array of CPU address information for planes
+ */
+struct cam_fd_hw_io_buffer {
+	bool                   valid;
+	struct cam_buf_io_cfg *io_cfg;
+	uint32_t               num_buf;
+	uint64_t               io_addr[CAM_PACKET_MAX_PLANES];
+	uintptr_t              cpu_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_fd_hw_req_private : FD HW layer's private information
+ *               specific to a request
+ *
+ * @ctx_hw_private  : FD HW layer's ctx specific private data
+ * @request_id      : Request ID corresponding to this private information
+ * @get_raw_results : Whether to get raw results for this request
+ * @ro_mode_enabled : Whether RO mode is enabled for this request
+ * @fd_results      : Pointer to save face detection results
+ * @raw_results     : Pointer to save face detection raw results
+ */
+struct cam_fd_hw_req_private {
+	void                  *ctx_hw_private;
+	uint64_t               request_id;
+	bool                   get_raw_results;
+	bool                   ro_mode_enabled;
+	struct cam_fd_results *fd_results;
+	uint32_t              *raw_results;
+};
+
+/**
+ * struct cam_fd_hw_reserve_args : Reserve args for this HW context
+ *
+ * @hw_ctx         : HW context for which reserve is requested
+ * @mode           : Mode for which this reserve is requested
+ * @ctx_hw_private : Pointer to save HW layer's private information specific
+ *                   to this hw context. This has to be passed while calling
+ *                   further HW layer calls
+ */
+struct cam_fd_hw_reserve_args {
+	void                  *hw_ctx;
+	enum cam_fd_hw_mode    mode;
+	void                  *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_release_args : Release args for this HW context
+ *
+ * @hw_ctx         : HW context for which release is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_release_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_init_args : Init args for this HW context
+ *
+ * @hw_ctx         : HW context for which init is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_init_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_deinit_args : Deinit args for this HW context
+ *
+ * @hw_ctx         : HW context for which deinit is requested
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_deinit_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_prestart_args : Prestart command args
+ *
+ * @hw_ctx               : HW context which submitted this prestart
+ * @ctx_hw_private       : HW layer's private information specific to
+ *                         this hw context
+ * @request_id           : Request ID corresponds to this pre-start command
+ * @get_raw_results      : Whether to get raw results for this request
+ * @input_buf            : Input IO Buffer information for this request
+ * @output_buf           : Output IO Buffer information for this request
+ * @cmd_buf_addr         : Command buffer address to fill kmd commands
+ * @size                 : Size available in command buffer
+ * @pre_config_buf_size  : Buffer size filled with commands by KMD that has
+ *                         to be inserted before umd commands
+ * @post_config_buf_size : Buffer size filled with commands by KMD that has
+ *                         to be inserted after umd commands
+ * @hw_req_private       : HW layer's private information specific to
+ *                         this request
+ */
+struct cam_fd_hw_cmd_prestart_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	bool                          get_raw_results;
+	struct cam_fd_hw_io_buffer    input_buf[CAM_FD_MAX_IO_BUFFERS];
+	struct cam_fd_hw_io_buffer    output_buf[CAM_FD_MAX_IO_BUFFERS];
+	uint32_t                     *cmd_buf_addr;
+	uint32_t                      size;
+	uint32_t                      pre_config_buf_size;
+	uint32_t                      post_config_buf_size;
+	struct cam_fd_hw_req_private  hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_start_args : Start command args
+ *
+ * @hw_ctx                : HW context which submitting start command
+ * @ctx_hw_private        : HW layer's private information specific to
+ *                            this hw context
+ * @hw_req_private        : HW layer's private information specific to
+ *          this request
+ * @hw_update_entries     : HW update entries corresponds to this request
+ * @num_hw_update_entries : Number of hw update entries
+ */
+struct cam_fd_hw_cmd_start_args {
+	void                          *hw_ctx;
+	void                          *ctx_hw_private;
+	struct cam_fd_hw_req_private  *hw_req_private;
+	struct cam_hw_update_entry    *hw_update_entries;
+	uint32_t                       num_hw_update_entries;
+};
+
+/**
+ * struct cam_fd_hw_stop_args : Stop command args
+ *
+ * @hw_ctx         : HW context which submitting stop command
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ * @request_id     : Request ID that need to be stopped
+ * @hw_req_private : HW layer's private information specific to this request
+ */
+struct cam_fd_hw_stop_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	struct cam_fd_hw_req_private *hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_frame_done_args : Frame done command args
+ *
+ * @hw_ctx         : HW context which submitting frame done request
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ * @request_id     : Request ID that need to be stopped
+ * @hw_req_private : HW layer's private information specific to this request
+ */
+struct cam_fd_hw_frame_done_args {
+	void                         *hw_ctx;
+	void                         *ctx_hw_private;
+	uint64_t                      request_id;
+	struct cam_fd_hw_req_private *hw_req_private;
+};
+
+/**
+ * struct cam_fd_hw_reset_args : Reset command args
+ *
+ * @hw_ctx         : HW context which submitting reset command
+ * @ctx_hw_private : HW layer's private information specific to this hw context
+ */
+struct cam_fd_hw_reset_args {
+	void    *hw_ctx;
+	void    *ctx_hw_private;
+};
+
+/**
+ * struct cam_fd_hw_cmd_set_irq_cb : Set IRQ callback command args
+ *
+ * @cam_fd_hw_mgr_cb : HW Mgr's callback pointer
+ * @data             : HW Mgr's private data
+ */
+struct cam_fd_hw_cmd_set_irq_cb {
+	int (*cam_fd_hw_mgr_cb)(void *data, enum cam_fd_hw_irq_type irq_type);
+	void *data;
+};
+
+#endif /* _CAM_FD_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
new file mode 100644
index 0000000..78b9948
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_fd_hw_core.h"
+#include "cam_fd_hw_soc.h"
+
+static bool cam_fd_hw_util_cpas_callback(uint32_t handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	if (!irq_data)
+		return false;
+
+	CAM_DBG(CAM_FD, "CPAS hdl=%d, udata=%pK, irq_type=%d",
+		handle, userdata, irq_data->irq_type);
+
+	return false;
+}
+
+static int cam_fd_hw_soc_util_setup_regbase_indices(
+	struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	uint32_t index;
+	int rc, i;
+
+	for (i = 0; i < CAM_FD_REG_MAX; i++)
+		soc_private->regbase_index[i] = -1;
+
+	if ((soc_info->num_mem_block > CAM_SOC_MAX_BLOCK) ||
+		(soc_info->num_mem_block != CAM_FD_REG_MAX)) {
+		CAM_ERR(CAM_FD, "Invalid num_mem_block=%d",
+			soc_info->num_mem_block);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "fd_core", &index);
+	if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
+		soc_private->regbase_index[CAM_FD_REG_CORE] = index;
+	} else {
+		CAM_ERR(CAM_FD, "regbase not found for FD_CORE, rc=%d, %d %d",
+			rc, index, CAM_FD_REG_MAX);
+		return -EINVAL;
+	}
+
+	rc = cam_common_util_get_string_index(soc_info->mem_block_name,
+		soc_info->num_mem_block, "fd_wrapper", &index);
+	if ((rc == 0) && (index < CAM_FD_REG_MAX)) {
+		soc_private->regbase_index[CAM_FD_REG_WRAPPER] = index;
+	} else {
+		CAM_ERR(CAM_FD, "regbase not found FD_WRAPPER, rc=%d, %d %d",
+			rc, index, CAM_FD_REG_MAX);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_FD, "Reg indices : CORE=%d, WRAPPER=%d",
+		soc_private->regbase_index[CAM_FD_REG_CORE],
+		soc_private->regbase_index[CAM_FD_REG_WRAPPER]);
+
+	return 0;
+}
+
+static int cam_fd_soc_set_clk_flags(struct cam_hw_soc_info *soc_info)
+{
+	int i, rc = 0;
+
+	if (soc_info->num_clk > CAM_SOC_MAX_CLK) {
+		CAM_ERR(CAM_FD, "Invalid num clk %d", soc_info->num_clk);
+		return -EINVAL;
+	}
+
+	/* set memcore and mem periphery logic flags to 0 */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		if ((strcmp(soc_info->clk_name[i], "fd_core_clk") == 0) ||
+			(strcmp(soc_info->clk_name[i], "fd_core_uar_clk") ==
+			0)) {
+			rc = cam_soc_util_set_clk_flags(soc_info, i,
+				CLKFLAG_NORETAIN_MEM);
+			if (rc)
+				CAM_ERR(CAM_FD,
+					"Failed in NORETAIN_MEM i=%d, rc=%d",
+					i, rc);
+
+			cam_soc_util_set_clk_flags(soc_info, i,
+				CLKFLAG_NORETAIN_PERIPH);
+			if (rc)
+				CAM_ERR(CAM_FD,
+					"Failed in NORETAIN_PERIPH i=%d, rc=%d",
+					i, rc);
+		}
+	}
+
+	return rc;
+}
+
+void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int32_t reg_index = soc_private->regbase_index[reg_base];
+
+	CAM_DBG(CAM_FD, "FD_REG_WRITE: Base[%d] Offset[0x%8x] Value[0x%8x]",
+		reg_base, reg_offset, reg_value);
+
+	cam_io_w_mb(reg_value,
+		soc_info->reg_map[reg_index].mem_base + reg_offset);
+}
+
+uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int32_t reg_index = soc_private->regbase_index[reg_base];
+	uint32_t reg_value;
+
+	reg_value = cam_io_r_mb(
+		soc_info->reg_map[reg_index].mem_base + reg_offset);
+
+	CAM_DBG(CAM_FD, "FD_REG_READ: Base[%d] Offset[0x%8x] Value[0x%8x]",
+		reg_base, reg_offset, reg_value);
+
+	return reg_value;
+}
+
+int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private = soc_info->soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = 7200000;
+	axi_vote.uncompressed_bw = 7200000;
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in CPAS START, rc=%d", rc);
+		return -EFAULT;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+		true);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error enable platform failed, rc=%d", rc);
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	if (cam_cpas_stop(soc_private->cpas_handle))
+		CAM_ERR(CAM_FD, "Error in CPAS STOP");
+
+	return rc;
+}
+
+
+int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private;
+	int rc = 0;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_FD, "Invalid soc_info param");
+		return -EINVAL;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_FD, "disable platform resources failed, rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Error in CPAS STOP, handle=0x%x, rc=%d",
+			soc_private->cpas_handle, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data)
+{
+	struct cam_fd_soc_private *soc_private;
+	struct cam_cpas_register_params cpas_register_param;
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in get_dt_properties, rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		private_data);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in request_platform_resource rc=%d",
+			rc);
+		return rc;
+	}
+
+	rc = cam_fd_soc_set_clk_flags(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "failed in set_clk_flags rc=%d", rc);
+		goto release_res;
+	}
+
+	soc_private = kzalloc(sizeof(struct cam_fd_soc_private), GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+	soc_info->soc_private = soc_private;
+
+	rc = cam_fd_hw_soc_util_setup_regbase_indices(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FD, "Failed in setup regbase, rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "fd", CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = &soc_info->pdev->dev;
+	cpas_register_param.userdata = private_data;
+	cpas_register_param.cam_cpas_client_cb = cam_fd_hw_util_cpas_callback;
+
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_FD, "CPAS registration failed");
+		goto free_soc_private;
+	}
+	soc_private->cpas_handle = cpas_register_param.client_handle;
+	CAM_DBG(CAM_FD, "CPAS handle=%d", soc_private->cpas_handle);
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+
+	return rc;
+}
+
+int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_fd_soc_private *soc_private =
+		(struct cam_fd_soc_private *)soc_info->soc_private;
+	int rc;
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_FD, "Unregister cpas failed, handle=%d, rc=%d",
+			soc_private->cpas_handle, rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_FD, "release platform failed, rc=%d", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h
new file mode 100644
index 0000000..cc75aa7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_soc.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_SOC_H_
+#define _CAM_FD_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+/**
+ * enum cam_fd_reg_base - Enum for FD register sets
+ *
+ * @CAM_FD_REG_CORE    : Indicates FD Core register space
+ * @CAM_FD_REG_WRAPPER : Indicates FD Wrapper register space
+ * @CAM_FD_REG_MAX     : Max number of register sets supported
+ */
+enum cam_fd_reg_base {
+	CAM_FD_REG_CORE,
+	CAM_FD_REG_WRAPPER,
+	CAM_FD_REG_MAX
+};
+
+/**
+ * struct cam_fd_soc_private : FD private SOC information
+ *
+ * @regbase_index : Mapping between Register base enum to register index in SOC
+ * @cpas_handle   : CPAS handle
+ *
+ */
+struct cam_fd_soc_private {
+	int32_t        regbase_index[CAM_FD_REG_MAX];
+	uint32_t       cpas_handle;
+};
+
+int cam_fd_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data);
+int cam_fd_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+int cam_fd_soc_enable_resources(struct cam_hw_soc_info *soc_info);
+int cam_fd_soc_disable_resources(struct cam_hw_soc_info *soc_info);
+uint32_t cam_fd_soc_register_read(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset);
+void cam_fd_soc_register_write(struct cam_hw_soc_info *soc_info,
+	enum cam_fd_reg_base reg_base, uint32_t reg_offset, uint32_t reg_value);
+
+#endif /* _CAM_FD_HW_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h
new file mode 100644
index 0000000..9b283f2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v41.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_V41_H_
+#define _CAM_FD_HW_V41_H_
+
+static struct cam_fd_hw_static_info cam_fd_wrapper120_core410_info = {
+	.core_version = {
+		.major  = 4,
+		.minor  = 1,
+		.incr   = 0,
+	},
+	.wrapper_version = {
+		.major  = 1,
+		.minor  = 2,
+		.incr   = 0,
+	},
+	.core_regs = {
+		.version               = 0x38,
+		.control               = 0x0,
+		.result_cnt            = 0x4,
+		.result_addr           = 0x20,
+		.image_addr            = 0x24,
+		.work_addr             = 0x28,
+		.ro_mode               = 0x34,
+		.results_reg_base      = 0x400,
+		.raw_results_reg_base  = 0x800,
+	},
+	.wrapper_regs = {
+		.wrapper_version       = 0x0,
+		.cgc_disable           = 0x4,
+		.hw_stop               = 0x8,
+		.sw_reset              = 0x10,
+		.vbif_req_priority     = 0x20,
+		.vbif_priority_level   = 0x24,
+		.vbif_done_status      = 0x34,
+		.irq_mask              = 0x50,
+		.irq_status            = 0x54,
+		.irq_clear             = 0x58,
+	},
+	.results = {
+		.max_faces             = 35,
+		.per_face_entries      = 4,
+		.raw_results_available = true,
+		.raw_results_entries   = 512,
+	},
+	.enable_errata_wa = {
+		.single_irq_only         = true,
+		.ro_mode_enable_always   = true,
+		.ro_mode_results_invalid = true,
+	},
+	.irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
+	.qos_priority       = 4,
+	.qos_priority_level = 4,
+	.supported_modes    = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
+	.ro_mode_supported  = true,
+};
+
+#endif /* _CAM_FD_HW_V41_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h
new file mode 100644
index 0000000..6b3f2c3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_fd/fd_hw_mgr/fd_hw/cam_fd_hw_v501.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FD_HW_V501_H_
+#define _CAM_FD_HW_V501_H_
+
+static struct cam_fd_hw_static_info cam_fd_wrapper200_core501_info = {
+	.core_version = {
+		.major  = 5,
+		.minor  = 0,
+		.incr   = 1,
+	},
+	.wrapper_version = {
+		.major  = 2,
+		.minor  = 0,
+		.incr   = 0,
+	},
+	.core_regs = {
+		.version               = 0x38,
+		.control               = 0x0,
+		.result_cnt            = 0x4,
+		.result_addr           = 0x20,
+		.image_addr            = 0x24,
+		.work_addr             = 0x28,
+		.ro_mode               = 0x34,
+		.results_reg_base      = 0x400,
+		.raw_results_reg_base  = 0x800,
+	},
+	.wrapper_regs = {
+		.wrapper_version       = 0x0,
+		.cgc_disable           = 0x4,
+		.hw_stop               = 0x8,
+		.sw_reset              = 0x10,
+		.vbif_req_priority     = 0x20,
+		.vbif_priority_level   = 0x24,
+		.vbif_done_status      = 0x34,
+		.irq_mask              = 0x50,
+		.irq_status            = 0x54,
+		.irq_clear             = 0x58,
+	},
+	.results = {
+		.max_faces             = 35,
+		.per_face_entries      = 4,
+		.raw_results_available = true,
+		.raw_results_entries   = 512,
+	},
+	.enable_errata_wa = {
+		.single_irq_only         = true,
+		.ro_mode_enable_always   = true,
+		.ro_mode_results_invalid = true,
+	},
+	.irq_mask = CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_FRAME_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_HALT_DONE) |
+		CAM_FD_IRQ_TO_MASK(CAM_FD_IRQ_RESET_DONE),
+	.qos_priority       = 4,
+	.qos_priority_level = 4,
+	.supported_modes    = CAM_FD_MODE_FACEDETECTION | CAM_FD_MODE_PYRAMID,
+	.ro_mode_supported  = true,
+};
+
+#endif /* _CAM_FD_HW_V501_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/Makefile b/drivers/media/platform/msm/camera/cam_icp/Makefile
new file mode 100644
index 0000000..e277f04
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_subdev.o cam_icp_context.o hfi.o
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
new file mode 100644
index 0000000..e799491
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <media/cam_sync.h>
+#include <media/cam_defs.h>
+#include "cam_sync_api.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_icp_context.h"
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+
+static const char icp_dev_name[] = "icp";
+
+static int cam_icp_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info)
+{
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp = NULL;
+	struct cam_hw_mgr_dump_pf_data  *pf_dbg_entry = NULL;
+	int rc = 0;
+	bool b_mem_found = false;
+
+	if (!ctx) {
+		CAM_ERR(CAM_ICP, "Invalid ctx");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_ICP, "iommu fault for icp ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+			&ctx->active_req_list, list) {
+		pf_dbg_entry = &(req->pf_data);
+		CAM_INFO(CAM_ICP, "req_id : %lld", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			iova, buf_info, &b_mem_found);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed to dump pf info");
+
+		if (b_mem_found)
+			CAM_ERR(CAM_ICP, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+
+	return rc;
+}
+
+static int __cam_icp_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (!rc) {
+		ctx->state = CAM_CTX_ACQUIRED;
+		trace_cam_context_state("ICP", ctx);
+	}
+
+	return rc;
+}
+
+static int __cam_icp_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Unable to release device");
+
+	ctx->state = CAM_CTX_AVAILABLE;
+	trace_cam_context_state("ICP", ctx);
+	return rc;
+}
+
+static int __cam_icp_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (!rc) {
+		ctx->state = CAM_CTX_READY;
+		trace_cam_context_state("ICP", ctx);
+	}
+
+	return rc;
+}
+
+static int __cam_icp_flush_dev_in_ready(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to flush device");
+
+	return rc;
+}
+
+static int __cam_icp_config_dev_in_ready(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to prepare device");
+
+	return rc;
+}
+
+static int __cam_icp_stop_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to stop device");
+
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("ICP", ctx);
+	return rc;
+}
+
+static int __cam_icp_release_dev_in_ready(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = __cam_icp_stop_dev_in_ready(ctx, NULL);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to stop device");
+
+	rc = __cam_icp_release_dev_in_acquired(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to release device");
+
+	return rc;
+}
+
+static int __cam_icp_handle_buf_done_in_ready(void *ctx,
+	uint32_t evt_id, void *done)
+{
+	return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+static struct cam_ctx_ops
+	cam_icp_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_icp_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_icp_release_dev_in_acquired,
+			.start_dev = __cam_icp_start_dev_in_acquired,
+			.config_dev = __cam_icp_config_dev_in_ready,
+			.flush_dev = __cam_icp_flush_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_icp_handle_buf_done_in_ready,
+		.pagefault_ops = cam_icp_context_dump_active_request,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_icp_stop_dev_in_ready,
+			.release_dev = __cam_icp_release_dev_in_ready,
+			.config_dev = __cam_icp_config_dev_in_ready,
+			.flush_dev = __cam_icp_flush_dev_in_ready,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_icp_handle_buf_done_in_ready,
+		.pagefault_ops = cam_icp_context_dump_active_request,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+		.pagefault_ops = cam_icp_context_dump_active_request,
+	},
+};
+
+int cam_icp_context_init(struct cam_icp_context *ctx,
+	struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id)
+{
+	int rc;
+
+	if ((!ctx) || (!ctx->base) || (!hw_intf)) {
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", ctx, hw_intf);
+		rc = -EINVAL;
+		goto err;
+	}
+
+	rc = cam_context_init(ctx->base, icp_dev_name, CAM_ICP, ctx_id,
+		NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Camera Context Base init failed");
+		goto err;
+	}
+
+	ctx->base->state_machine = cam_icp_ctx_state_machine;
+	ctx->base->ctx_priv = ctx;
+	ctx->ctxt_to_hw_map = NULL;
+
+err:
+	return rc;
+}
+
+int cam_icp_context_deinit(struct cam_icp_context *ctx)
+{
+	if ((!ctx) || (!ctx->base)) {
+		CAM_ERR(CAM_ICP, "Invalid params: %pK", ctx);
+		return -EINVAL;
+	}
+
+	cam_context_deinit(ctx->base);
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
new file mode 100644
index 0000000..e5ae61f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_context.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ICP_CONTEXT_H_
+#define _CAM_ICP_CONTEXT_H_
+
+#include "cam_context.h"
+
+/**
+ * struct cam_icp_context - icp context
+ * @base: icp context object
+ * @state_machine: state machine for ICP context
+ * @req_base: common request structure
+ * @state: icp context state
+ * @ctxt_to_hw_map: context to FW handle mapping
+ */
+struct cam_icp_context {
+	struct cam_context *base;
+	struct cam_ctx_ops *state_machine;
+	struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+	uint32_t state;
+	void *ctxt_to_hw_map;
+};
+
+/**
+ * cam_icp_context_init() - ICP context init
+ * @ctx: Pointer to context
+ * @hw_intf: Pointer to ICP hardware interface
+ * @ctx_id: ID for this context
+ */
+int cam_icp_context_init(struct cam_icp_context *ctx,
+	struct cam_hw_mgr_intf *hw_intf, uint32_t ctx_id);
+
+/**
+ * cam_icp_context_deinit() - ICP context deinit
+ * @ctx: Pointer to context
+ */
+int cam_icp_context_deinit(struct cam_icp_context *ctx);
+
+#endif /* _CAM_ICP_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
new file mode 100644
index 0000000..6d232c32
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/cam_icp_subdev.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_req_mgr_dev.h"
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_context.h"
+#include "cam_icp_context.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+
+#define CAM_ICP_DEV_NAME        "cam-icp"
+
+struct cam_icp_subdev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[CAM_ICP_CTX_MAX];
+	struct cam_icp_context ctx_icp[CAM_ICP_CTX_MAX];
+	struct mutex icp_lock;
+	int32_t open_cnt;
+	int32_t reserved;
+};
+
+static struct cam_icp_subdev g_icp_dev;
+
+static const struct of_device_id cam_icp_dt_match[] = {
+	{.compatible = "qcom,cam-icp"},
+	{}
+};
+
+static void cam_icp_dev_iommu_fault_handler(
+	struct iommu_domain *domain, struct device *dev, unsigned long iova,
+	int flags, void *token, uint32_t buf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!token) {
+		CAM_ERR(CAM_ICP, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+			buf_info);
+}
+
+static int cam_icp_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	mutex_lock(&g_icp_dev.icp_lock);
+	if (g_icp_dev.open_cnt >= 1) {
+		CAM_ERR(CAM_ICP, "ICP subdev is already opened");
+		rc = -EALREADY;
+		goto end;
+	}
+
+	if (!node) {
+		CAM_ERR(CAM_ICP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hw_mgr_intf = &node->hw_mgr_intf;
+	rc = hw_mgr_intf->hw_open(hw_mgr_intf->hw_mgr_priv, NULL);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "FW download failed");
+		goto end;
+	}
+	g_icp_dev.open_cnt++;
+end:
+	mutex_unlock(&g_icp_dev.icp_lock);
+	return rc;
+}
+
+static int cam_icp_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_hw_mgr_intf *hw_mgr_intf = NULL;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&g_icp_dev.icp_lock);
+	if (g_icp_dev.open_cnt <= 0) {
+		CAM_DBG(CAM_ICP, "ICP subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+	g_icp_dev.open_cnt--;
+	if (!node) {
+		CAM_ERR(CAM_ICP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	hw_mgr_intf = &node->hw_mgr_intf;
+	if (!hw_mgr_intf) {
+		CAM_ERR(CAM_ICP, "hw_mgr_intf is not initialized");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = cam_node_shutdown(node);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "HW close failed");
+		goto end;
+	}
+
+end:
+	mutex_unlock(&g_icp_dev.icp_lock);
+	return 0;
+}
+
+const struct v4l2_subdev_internal_ops cam_icp_subdev_internal_ops = {
+	.open = cam_icp_subdev_open,
+	.close = cam_icp_subdev_close,
+};
+
+static int cam_icp_probe(struct platform_device *pdev)
+{
+	int rc = 0, i = 0;
+	struct cam_node *node;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	int iommu_hdl = -1;
+
+	if (!pdev) {
+		CAM_ERR(CAM_ICP, "pdev is NULL");
+		return -EINVAL;
+	}
+
+	g_icp_dev.sd.pdev = pdev;
+	g_icp_dev.sd.internal_ops = &cam_icp_subdev_internal_ops;
+	rc = cam_subdev_probe(&g_icp_dev.sd, pdev, CAM_ICP_DEV_NAME,
+		CAM_ICP_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "ICP cam_subdev_probe failed");
+		goto probe_fail;
+	}
+
+	node = (struct cam_node *) g_icp_dev.sd.token;
+
+	hw_mgr_intf = kzalloc(sizeof(*hw_mgr_intf), GFP_KERNEL);
+	if (!hw_mgr_intf) {
+		rc = -EINVAL;
+		goto hw_alloc_fail;
+	}
+
+	rc = cam_icp_hw_mgr_init(pdev->dev.of_node, (uint64_t *)hw_mgr_intf,
+		&iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "ICP HW manager init failed: %d", rc);
+		goto hw_init_fail;
+	}
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		g_icp_dev.ctx_icp[i].base = &g_icp_dev.ctx[i];
+		rc = cam_icp_context_init(&g_icp_dev.ctx_icp[i],
+					hw_mgr_intf, i);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "ICP context init failed");
+			goto ctx_fail;
+		}
+	}
+
+	rc = cam_node_init(node, hw_mgr_intf, g_icp_dev.ctx,
+				CAM_ICP_CTX_MAX, CAM_ICP_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "ICP node init failed");
+		goto ctx_fail;
+	}
+
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_icp_dev_iommu_fault_handler, node);
+
+	g_icp_dev.open_cnt = 0;
+	mutex_init(&g_icp_dev.icp_lock);
+
+	CAM_DBG(CAM_ICP, "ICP probe complete");
+
+	return rc;
+
+ctx_fail:
+	for (--i; i >= 0; i--)
+		cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
+hw_init_fail:
+	kfree(hw_mgr_intf);
+hw_alloc_fail:
+	cam_subdev_remove(&g_icp_dev.sd);
+probe_fail:
+	return rc;
+}
+
+static int cam_icp_remove(struct platform_device *pdev)
+{
+	int i;
+	struct v4l2_subdev *sd;
+	struct cam_subdev *subdev;
+
+	if (!pdev) {
+		CAM_ERR(CAM_ICP, "pdev is NULL");
+		return -ENODEV;
+	}
+
+	sd = platform_get_drvdata(pdev);
+	if (!sd) {
+		CAM_ERR(CAM_ICP, "V4l2 subdev is NULL");
+		return -ENODEV;
+	}
+
+	subdev = v4l2_get_subdevdata(sd);
+	if (!subdev) {
+		CAM_ERR(CAM_ICP, "cam subdev is NULL");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+		cam_icp_context_deinit(&g_icp_dev.ctx_icp[i]);
+	cam_node_deinit(g_icp_dev.node);
+	cam_subdev_remove(&g_icp_dev.sd);
+	mutex_destroy(&g_icp_dev.icp_lock);
+
+	return 0;
+}
+
+static struct platform_driver cam_icp_driver = {
+	.probe = cam_icp_probe,
+	.remove = cam_icp_remove,
+	.driver = {
+		.name = "cam_icp",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_icp_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_icp_init_module(void)
+{
+	return platform_driver_register(&cam_icp_driver);
+}
+
+static void __exit cam_icp_exit_module(void)
+{
+	platform_driver_unregister(&cam_icp_driver);
+}
+module_init(cam_icp_init_module);
+module_exit(cam_icp_exit_module);
+MODULE_DESCRIPTION("MSM ICP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
new file mode 100644
index 0000000..b116098
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_intf.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _HFI_INTF_H_
+#define _HFI_INTF_H_
+
+#include <linux/types.h>
+
+/**
+ * struct hfi_mem
+ * @len: length of memory
+ * @kva: kernel virtual address
+ * @iova: IO virtual address
+ * @reserved: reserved field
+ */
+struct hfi_mem {
+	uint64_t  len;
+	uintptr_t kva;
+	uint32_t  iova;
+	uint32_t  reserved;
+};
+
+/**
+ * struct hfi_mem_info
+ * @qtbl: qtable hfi memory
+ * @cmd_q: command queue hfi memory for host to firmware communication
+ * @msg_q: message queue hfi memory for firmware to host communication
+ * @dbg_q: debug queue hfi memory for firmware debug information
+ * @sfr_buf: buffer for subsystem failure reason[SFR]
+ * @sec_heap: secondary heap hfi memory for firmware
+ * @qdss: qdss mapped memory for fw
+ * @icp_base: icp base address
+ */
+struct hfi_mem_info {
+	struct hfi_mem qtbl;
+	struct hfi_mem cmd_q;
+	struct hfi_mem msg_q;
+	struct hfi_mem dbg_q;
+	struct hfi_mem sfr_buf;
+	struct hfi_mem sec_heap;
+	struct hfi_mem shmem;
+	struct hfi_mem qdss;
+	void __iomem *icp_base;
+};
+
+/**
+ * hfi_write_cmd() - function for hfi write
+ * @cmd_ptr: pointer to command data for hfi write
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_write_cmd(void *cmd_ptr);
+
+/**
+ * hfi_read_message() - function for hfi read
+ * @pmsg: buffer to place read message for hfi queue
+ * @q_id: queue id
+ * @words_read: total number of words read from the queue
+ *              returned as output to the caller
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id, uint32_t *words_read);
+
+/**
+ * hfi_init() - function initialize hfi after firmware download
+ * @event_driven_mode: event mode
+ * @hfi_mem: hfi memory info
+ * @icp_base: icp base address
+ * @debug: debug flag
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
+	void *__iomem icp_base, bool debug);
+
+/**
+ * hfi_get_hw_caps() - hardware capabilities from firmware
+ * @query_caps: holds query information from hfi
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int hfi_get_hw_caps(void *query_caps);
+
+/**
+ * hfi_send_system_cmd() - send hfi system command to firmware
+ * @type: type of system command
+ * @data: command data
+ * @size: size of command data
+ */
+void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size);
+
+/**
+ * cam_hfi_enable_cpu() - enable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_enable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_disable_cpu() - disable A5 CPU
+ * @icp_base: icp base address
+ */
+void cam_hfi_disable_cpu(void __iomem *icp_base);
+
+/**
+ * cam_hfi_deinit() - cleanup HFI
+ */
+void cam_hfi_deinit(void __iomem *icp_base);
+/**
+ * hfi_set_debug_level() - set debug level
+ * @a5_dbg_type: 1 for debug_q & 2 for qdss
+ * @lvl: FW debug message level
+ */
+int hfi_set_debug_level(u64 a5_dbg_type, uint32_t lvl);
+
+/**
+ * hfi_set_fw_dump_level() - set firmware dump level
+ * @lvl: level of firmware dump level
+ */
+int hfi_set_fw_dump_level(uint32_t lvl);
+
+/**
+ * hfi_enable_ipe_bps_pc() - Enable interframe pc
+ * Host sends a command to firmware to enable interframe
+ * power collapse for IPE and BPS hardware.
+ *
+ * @enable: flag to enable/disable
+ * @core_info: Core information to firmware
+ */
+int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info);
+
+/**
+ * hfi_cmd_ubwc_config() - UBWC configuration to firmware
+ * @ubwc_cfg: UBWC configuration parameters
+ */
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg);
+
+/**
+ * cam_hfi_resume() - function to resume
+ * @hfi_mem: hfi memory info
+ * @icp_base: icp base address
+ * @debug: debug flag
+ *
+ * Returns success(zero)/failure(non zero)
+ */
+int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
+	void __iomem *icp_base, bool debug);
+
+/**
+ * cam_hfi_queue_dump() - utility function to dump hfi queues
+ */
+void cam_hfi_queue_dump(void);
+
+
+#endif /* _HFI_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
new file mode 100644
index 0000000..f389eae
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_reg.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HFI_REG_H_
+#define _CAM_HFI_REG_H_
+
+#include <linux/types.h>
+#include "hfi_intf.h"
+
+
+/* start of ICP CSR registers */
+#define HFI_REG_A5_HW_VERSION                   0x0
+#define HFI_REG_A5_CSR_NSEC_RESET               0x4
+#define HFI_REG_A5_CSR_A5_CONTROL               0x8
+#define HFI_REG_A5_CSR_ETM                      0xC
+#define HFI_REG_A5_CSR_A2HOSTINTEN              0x10
+#define HFI_REG_A5_CSR_A2HOSTINT                0x14
+#define HFI_REG_A5_CSR_A2HOSTINTCLR             0x18
+#define HFI_REG_A5_CSR_A2HOSTINTSTATUS          0x1C
+#define HFI_REG_A5_CSR_A2HOSTINTSET             0x20
+#define HFI_REG_A5_CSR_HOST2ICPINT              0x30
+#define HFI_REG_A5_CSR_A5_STATUS                0x200
+#define HFI_REG_A5_QGIC2_LM_ID                  0x204
+#define HFI_REG_A5_SPARE                        0x400
+
+/* general purpose registers from */
+#define HFI_REG_FW_VERSION                      0x44
+#define HFI_REG_HOST_ICP_INIT_REQUEST           0x48
+#define HFI_REG_ICP_HOST_INIT_RESPONSE          0x4C
+#define HFI_REG_SHARED_MEM_PTR                  0x50
+#define HFI_REG_SHARED_MEM_SIZE                 0x54
+#define HFI_REG_QTBL_PTR                        0x58
+#define HFI_REG_UNCACHED_HEAP_PTR               0x5C
+#define HFI_REG_UNCACHED_HEAP_SIZE              0x60
+#define HFI_REG_QDSS_IOVA                       0x6C
+#define HFI_REG_QDSS_IOVA_SIZE                  0x70
+#define HFI_REG_SFR_PTR                         0x68
+/* end of ICP CSR registers */
+
+/* flags for ICP CSR registers */
+#define ICP_FLAG_CSR_WAKE_UP_EN                 (1 << 4)
+#define ICP_FLAG_CSR_A5_EN                      (1 << 9)
+#define ICP_CSR_EN_CLKGATE_WFI                  (1 << 12)
+#define ICP_CSR_EDBGRQ                          (1 << 14)
+#define ICP_CSR_DBGSWENABLE                     (1 << 22)
+#define ICP_CSR_A5_STATUS_WFI                   (1 << 7)
+
+#define ICP_FLAG_A5_CTRL_DBG_EN                 (ICP_FLAG_CSR_WAKE_UP_EN|\
+						ICP_FLAG_CSR_A5_EN|\
+						ICP_CSR_EDBGRQ|\
+						ICP_CSR_DBGSWENABLE)
+
+#define ICP_FLAG_A5_CTRL_EN                     (ICP_FLAG_CSR_WAKE_UP_EN|\
+						ICP_FLAG_CSR_A5_EN|\
+						ICP_CSR_EN_CLKGATE_WFI)
+
+/* start of Queue table and queues */
+#define MAX_ICP_HFI_QUEUES                      4
+#define ICP_QHDR_TX_TYPE_MASK                   0xFF000000
+#define ICP_QHDR_RX_TYPE_MASK                   0x00FF0000
+#define ICP_QHDR_PRI_TYPE_MASK                  0x0000FF00
+#define ICP_QHDR_Q_ID_MASK                      0x000000FF
+
+#define ICP_CMD_Q_SIZE_IN_BYTES                 4096
+#define ICP_MSG_Q_SIZE_IN_BYTES                 4096
+#define ICP_DBG_Q_SIZE_IN_BYTES                 102400
+#define ICP_MSG_SFR_SIZE_IN_BYTES               4096
+
+#define ICP_SHARED_MEM_IN_BYTES                 (1024 * 1024)
+#define ICP_UNCACHED_HEAP_SIZE_IN_BYTES         (2 * 1024 * 1024)
+#define ICP_HFI_MAX_PKT_SIZE_IN_WORDS           25600
+#define ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS      256
+
+#define ICP_HFI_QTBL_HOSTID1                    0x01000000
+#define ICP_HFI_QTBL_STATUS_ENABLED             0x00000001
+#define ICP_HFI_NUMBER_OF_QS                    3
+#define ICP_HFI_NUMBER_OF_ACTIVE_QS             3
+#define ICP_HFI_QTBL_OFFSET                     0
+#define ICP_HFI_VAR_SIZE_PKT                    0
+#define ICP_HFI_MAX_MSG_SIZE_IN_WORDS           128
+
+
+/* Queue Header type masks. Use these to access bitfields in qhdr_type */
+#define HFI_MASK_QHDR_TX_TYPE                   0xFF000000
+#define HFI_MASK_QHDR_RX_TYPE                   0x00FF0000
+#define HFI_MASK_QHDR_PRI_TYPE                  0x0000FF00
+#define HFI_MASK_QHDR_Q_ID_TYPE                 0x000000FF
+
+
+#define TX_EVENT_DRIVEN_MODE_1                  0
+#define RX_EVENT_DRIVEN_MODE_1                  0
+#define TX_EVENT_DRIVEN_MODE_2                  0x01000000
+#define RX_EVENT_DRIVEN_MODE_2                  0x00010000
+#define TX_EVENT_POLL_MODE_2                    0x02000000
+#define RX_EVENT_POLL_MODE_2                    0x00020000
+#define U32_OFFSET                              0x1
+#define BYTE_WORD_SHIFT                         2
+
+/**
+ * @INVALID: Invalid state
+ * @HFI_DEINIT: HFI is not initialized yet
+ * @HFI_INIT: HFI is initialized
+ * @HFI_READY: HFI is ready to send/receive commands/messages
+ */
+enum hfi_state {
+	HFI_DEINIT,
+	HFI_INIT,
+	HFI_READY
+};
+
+/**
+ * @RESET: init success
+ * @SET: init failed
+ */
+enum reg_settings {
+	RESET,
+	SET,
+	SET_WM = 1024
+};
+
+/**
+ * @INTR_DISABLE: Disable interrupt
+ * @INTR_ENABLE: Enable interrupt
+ * @INTR_ENABLE_WD0: Enable WD0
+ * @INTR_ENABLE_WD1: Enable WD1
+ */
+enum intr_status {
+	INTR_DISABLE,
+	INTR_ENABLE,
+	INTR_ENABLE_WD0,
+	INTR_ENABLE_WD1 = 0x4
+};
+
+/**
+ * @ICP_INIT_RESP_RESET: reset init state
+ * @ICP_INIT_RESP_SUCCESS: init success
+ * @ICP_INIT_RESP_FAILED: init failed
+ */
+enum host_init_resp {
+	ICP_INIT_RESP_RESET,
+	ICP_INIT_RESP_SUCCESS,
+	ICP_INIT_RESP_FAILED
+};
+
+/**
+ * @ICP_INIT_REQUEST_RESET: reset init request
+ * @ICP_INIT_REQUEST_SET: set init request
+ */
+enum host_init_request {
+	ICP_INIT_REQUEST_RESET,
+	ICP_INIT_REQUEST_SET
+};
+
+/**
+ * @QHDR_INACTIVE: Queue is inactive
+ * @QHDR_ACTIVE: Queue is active
+ */
+enum qhdr_status {
+	QHDR_INACTIVE,
+	QHDR_ACTIVE
+};
+
+/**
+ * @INTR_MODE: event driven mode 1, each send and receive generates interrupt
+ * @WM_MODE: event driven mode 2, interrupts based on watermark mechanism
+ * @POLL_MODE: poll method
+ */
+enum qhdr_event_drv_type {
+	INTR_MODE,
+	WM_MODE,
+	POLL_MODE
+};
+
+/**
+ * @TX_INT: event driven mode 1, each send and receive generates interrupt
+ * @TX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
+ * @TX_POLL: poll method
+ * @ICP_QHDR_TX_TYPE_MASK defines position in qhdr_type
+ */
+enum qhdr_tx_type {
+	TX_INT,
+	TX_INT_WM,
+	TX_POLL
+};
+
+/**
+ * @RX_INT: event driven mode 1, each send and receive generates interrupt
+ * @RX_INT_WM: event driven mode 2, interrupts based on watermark mechanism
+ * @RX_POLL: poll method
+ * @ICP_QHDR_RX_TYPE_MASK defines position in qhdr_type
+ */
+enum qhdr_rx_type {
+	RX_INT,
+	RX_INT_WM,
+	RX_POLL
+};
+
+/**
+ * @Q_CMD: Host to FW command queue
+ * @Q_MSG: FW to Host message queue
+ * @Q_DEBUG: FW to Host debug queue
+ * @ICP_QHDR_Q_ID_MASK defines position in qhdr_type
+ */
+enum qhdr_q_id {
+	Q_CMD,
+	Q_MSG,
+	Q_DBG
+};
+
+/**
+ * struct hfi_qtbl_hdr
+ * @qtbl_version: Queue table version number
+ *                Higher 16 bits: Major version
+ *                Lower 16 bits: Minor version
+ * @qtbl_size: Queue table size from version to last parametr in qhdr entry
+ * @qtbl_qhdr0_offset: Offset to the start of first qhdr
+ * @qtbl_qhdr_size: Queue header size in bytes
+ * @qtbl_num_q: Total number of queues in Queue table
+ * @qtbl_num_active_q: Total number of active queues
+ */
+struct hfi_qtbl_hdr {
+	uint32_t qtbl_version;
+	uint32_t qtbl_size;
+	uint32_t qtbl_qhdr0_offset;
+	uint32_t qtbl_qhdr_size;
+	uint32_t qtbl_num_q;
+	uint32_t qtbl_num_active_q;
+} __packed;
+
+/**
+ * struct hfi_q_hdr
+ * @qhdr_status: Queue status, qhdr_state define possible status
+ * @qhdr_start_addr: Queue start address in non cached memory
+ * @qhdr_type: qhdr_tx, qhdr_rx, qhdr_q_id and priority defines qhdr type
+ * @qhdr_q_size: Queue size
+ *		Number of queue packets if qhdr_pkt_size is non-zero
+ *		Queue size in bytes if qhdr_pkt_size is zero
+ * @qhdr_pkt_size: Size of queue packet entries
+ *		0x0: variable queue packet size
+ *		non zero: size of queue packet entry, fixed
+ * @qhdr_pkt_drop_cnt: Number of packets dropped by sender
+ * @qhdr_rx_wm: Receiver watermark, applicable in event driven mode
+ * @qhdr_tx_wm: Sender watermark, applicable in event driven mode
+ * @qhdr_rx_req: Receiver sets this bit if queue is empty
+ * @qhdr_tx_req: Sender sets this bit if queue is full
+ * @qhdr_rx_irq_status: Receiver sets this bit and triggers an interrupt to
+ *		the sender after packets are dequeued. Sender clears this bit
+ * @qhdr_tx_irq_status: Sender sets this bit and triggers an interrupt to
+ *		the receiver after packets are queued. Receiver clears this bit
+ * @qhdr_read_idx: Read index
+ * @qhdr_write_idx: Write index
+ */
+struct hfi_q_hdr {
+	uint32_t dummy[15];
+	uint32_t qhdr_status;
+	uint32_t dummy1[15];
+	uint32_t qhdr_start_addr;
+	uint32_t dummy2[15];
+	uint32_t qhdr_type;
+	uint32_t dummy3[15];
+	uint32_t qhdr_q_size;
+	uint32_t dummy4[15];
+	uint32_t qhdr_pkt_size;
+	uint32_t dummy5[15];
+	uint32_t qhdr_pkt_drop_cnt;
+	uint32_t dummy6[15];
+	uint32_t qhdr_rx_wm;
+	uint32_t dummy7[15];
+	uint32_t qhdr_tx_wm;
+	uint32_t dummy8[15];
+	uint32_t qhdr_rx_req;
+	uint32_t dummy9[15];
+	uint32_t qhdr_tx_req;
+	uint32_t dummy10[15];
+	uint32_t qhdr_rx_irq_status;
+	uint32_t dummy11[15];
+	uint32_t qhdr_tx_irq_status;
+	uint32_t dummy12[15];
+	uint32_t qhdr_read_idx;
+	uint32_t dummy13[15];
+	uint32_t qhdr_write_idx;
+	uint32_t dummy14[15];
+};
+
+/**
+ * struct sfr_buf
+ * @size: Number of characters
+ * @msg : Subsystem failure reason
+ */
+struct sfr_buf {
+	uint32_t size;
+	char msg[ICP_MSG_SFR_SIZE_IN_BYTES];
+};
+
+/**
+ * struct hfi_q_tbl
+ * @q_tbl_hdr: Queue table header
+ * @q_hdr: Queue header info, it holds info of cmd, msg and debug queues
+ */
+struct hfi_qtbl {
+	struct hfi_qtbl_hdr q_tbl_hdr;
+	struct hfi_q_hdr q_hdr[MAX_ICP_HFI_QUEUES];
+};
+
+/**
+ * struct hfi_info
+ * @map: Hfi shared memory info
+ * @smem_size: Shared memory size
+ * @uncachedheap_size: uncached heap size
+ * @msgpacket_buf: message buffer
+ * @hfi_state: State machine for hfi
+ * @cmd_q_lock: Lock for command queue
+ * @cmd_q_state: State of command queue
+ * @mutex msg_q_lock: Lock for message queue
+ * @msg_q_state: State of message queue
+ * @csr_base: CSR base address
+ */
+struct hfi_info {
+	struct hfi_mem_info map;
+	uint32_t smem_size;
+	uint32_t uncachedheap_size;
+	uint32_t msgpacket_buf[ICP_HFI_MAX_MSG_SIZE_IN_WORDS];
+	uint8_t hfi_state;
+	struct mutex cmd_q_lock;
+	bool cmd_q_state;
+	struct mutex msg_q_lock;
+	bool msg_q_state;
+	void __iomem *csr_base;
+};
+
+#endif /* _CAM_HFI_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
new file mode 100644
index 0000000..7bb1eba
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_session_defs.h
@@ -0,0 +1,523 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_HFI_SESSION_DEFS_H
+#define _CAM_HFI_SESSION_DEFS_H
+
+#include <linux/types.h>
+
+#define HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO             0x1
+#define HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS         0x2
+#define HFI_IPEBPS_CMD_OPCODE_BPS_ABORT                 0x3
+#define HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY               0x4
+
+#define HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO             0x5
+#define HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS         0x6
+#define HFI_IPEBPS_CMD_OPCODE_IPE_ABORT                 0x7
+#define HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY               0x8
+
+#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_IPE          0x9
+#define HFI_IPEBPS_CMD_OPCODE_BPS_WAIT_FOR_BPS          0xa
+#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_BPS          0xb
+#define HFI_IPEBPS_CMD_OPCODE_IPE_WAIT_FOR_IPE          0xc
+
+#define HFI_IPEBPS_HANDLE_TYPE_BPS                      0x1
+#define HFI_IPEBPS_HANDLE_TYPE_IPE_RT                   0x2
+#define HFI_IPEBPS_HANDLE_TYPE_IPE_NON_RT               0x3
+
+/**
+ * struct abort_data
+ * @num_req_ids: Number of req ids
+ * @num_req_id: point to specific req id
+ *
+ * create abort data
+ */
+struct abort_data {
+	uint32_t num_req_ids;
+	uint32_t num_req_id[1];
+};
+
+/**
+ * struct hfi_cmd_data
+ * @abort: abort data
+ * @user data: user supplied argument
+ *
+ * create session abort data
+ */
+struct hfi_cmd_abort {
+	struct abort_data abort;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_abort_destroy
+ * @user_data: user supplied data
+ *
+ * IPE/BPS destroy/abort command
+ * @HFI_IPEBPS_CMD_OPCODE_IPE_ABORT
+ * @HFI_IPEBPS_CMD_OPCODE_BPS_ABORT
+ * @HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY
+ * @HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY
+ */
+struct hfi_cmd_abort_destroy {
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_chaining_ops
+ * @wait_hdl: current session handle waits on wait_hdl to complete operation
+ * @user_data: user supplied argument
+ *
+ * this structure for chaining opcodes
+ * BPS_WAITS_FOR_IPE
+ * BPS_WAITS_FOR_BPS
+ * IPE_WAITS_FOR_BPS
+ * IPE_WAITS_FOR_IPE
+ */
+struct hfi_cmd_chaining_ops {
+	uint32_t wait_hdl;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_create_handle
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @handle_type: IPE/BPS firmware session handle type
+ * @user_data1: caller provided data1
+ * @user_data2: caller provided data2
+ *
+ * create firmware session handle
+ */
+struct hfi_cmd_create_handle {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t handle_type;
+	uint64_t user_data1;
+	uint64_t user_data2;
+} __packed;
+
+/**
+ * struct hfi_cmd_ipebps_async
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @opcode: opcode for IPE/BPS async operation
+ *          CONFIG_IO: configures I/O for IPE/BPS handle
+ *          FRAME_PROCESS: image frame to be processed by IPE/BPS
+ *          ABORT: abort all processing frames of IPE/BPS handle
+ *          DESTROY: destroy earlier created IPE/BPS handle
+ *          BPS_WAITS_FOR_IPE: sync for BPS to wait for IPE
+ *          BPS_WAITS_FOR_BPS: sync for BPS to wait for BPS
+ *          IPE_WAITS_FOR_IPE: sync for IPE to wait for IPE
+ *          IPE_WAITS_FOR_BPS: sync for IPE to wait for BPS
+ * @num_fw_handles: number of IPE/BPS firmware handles in fw_handles array
+ * @fw_handles: IPE/BPS handles array
+ * @payload: command payload for IPE/BPS opcodes
+ * @direct: points to actual payload
+ * @indirect: points to address of payload
+ *
+ * sends async command to the earlier created IPE or BPS handle
+ * for asynchronous operation.
+ */
+struct hfi_cmd_ipebps_async {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t opcode;
+	uint64_t user_data1;
+	uint64_t user_data2;
+	uint32_t num_fw_handles;
+	uint32_t fw_handles[1];
+	union {
+		uint32_t direct[1];
+		uint32_t indirect;
+	} payload;
+} __packed;
+
+/**
+ * struct hfi_msg_create_handle_ack
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code
+ * @fw_handle: output param for IPE/BPS handle
+ * @user_data1: user provided data1
+ * @user_data2: user provided data2
+ *
+ * ack for create handle command of IPE/BPS
+ * @HFI_MSG_IPEBPS_CREATE_HANDLE_ACK
+ */
+struct hfi_msg_create_handle_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+	uint32_t fw_handle;
+	uint64_t user_data1;
+	uint64_t user_data2;
+} __packed;
+
+/**
+ * struct hfi_msg_ipebps_async
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @opcode: opcode of IPE/BPS async operation
+ * @user_data1: user provided data1
+ * @user_data2: user provided data2
+ * @err_type: error code
+ * @msg_data: IPE/BPS async done message data
+ *
+ * result of IPE/BPS async command
+ * @HFI_MSG_IPEBPS_ASYNC_COMMAND_ACK
+ */
+struct hfi_msg_ipebps_async_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t opcode;
+	uint64_t user_data1;
+	uint64_t user_data2;
+	uint32_t err_type;
+	uint32_t msg_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_frame_process_done
+ * @result: result of frame process command
+ * @scratch_buffer_address: address of scratch buffer
+ */
+struct hfi_msg_frame_process_done {
+	uint32_t result;
+	uint32_t scratch_buffer_address;
+};
+
+/**
+ * struct hfi_msg_chaining_op
+ * @status: return status
+ * @user_data: user data provided as part of chaining ops
+ *
+ * IPE/BPS wait response
+ */
+struct hfi_msg_chaining_op {
+	uint32_t status;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_abort_destroy
+ * @status: return status
+ * @user_data: user data provided as part of abort/destroy ops
+ *
+ * IPE/BPS abort/destroy response
+ */
+struct hfi_msg_abort_destroy {
+	uint32_t status;
+	uint64_t user_data;
+} __packed;
+
+#define MAX_NUM_OF_IMAGE_PLANES	2
+#define MAX_HFR_GROUP          16
+
+enum hfi_ipe_io_images {
+	IPE_INPUT_IMAGE_FULL,
+	IPE_INPUT_IMAGE_DS4,
+	IPE_INPUT_IMAGE_DS16,
+	IPE_INPUT_IMAGE_DS64,
+	IPE_INPUT_IMAGE_FULL_REF,
+	IPE_INPUT_IMAGE_DS4_REF,
+	IPE_INPUT_IMAGE_DS16_REF,
+	IPE_INPUT_IMAGE_DS64_REF,
+	IPE_OUTPUT_IMAGE_DISPLAY,
+	IPE_OUTPUT_IMAGE_VIDEO,
+	IPE_OUTPUT_IMAGE_FULL_REF,
+	IPE_OUTPUT_IMAGE_DS4_REF,
+	IPE_OUTPUT_IMAGE_DS16_REF,
+	IPE_OUTPUT_IMAGE_DS64_REF,
+	IPE_INPUT_IMAGE_FIRST = IPE_INPUT_IMAGE_FULL,
+	IPE_INPUT_IMAGE_LAST = IPE_INPUT_IMAGE_DS64_REF,
+	IPE_OUTPUT_IMAGE_FIRST = IPE_OUTPUT_IMAGE_DISPLAY,
+	IPE_OUTPUT_IMAGE_LAST = IPE_OUTPUT_IMAGE_DS64_REF,
+	IPE_IO_IMAGES_MAX
+};
+
+enum bps_io_images {
+	BPS_INPUT_IMAGE,
+	BPS_OUTPUT_IMAGE_FULL,
+	BPS_OUTPUT_IMAGE_DS4,
+	BPS_OUTPUT_IMAGE_DS16,
+	BPS_OUTPUT_IMAGE_DS64,
+	BPS_OUTPUT_IMAGE_STATS_BG,
+	BPS_OUTPUT_IMAGE_STATS_BHIST,
+	BPS_OUTPUT_IMAGE_REG1,
+	BPS_OUTPUT_IMAGE_REG2,
+	BPS_OUTPUT_IMAGE_FIRST = BPS_OUTPUT_IMAGE_FULL,
+	BPS_OUTPUT_IMAGE_LAST = BPS_OUTPUT_IMAGE_REG2,
+	BPS_IO_IMAGES_MAX
+};
+
+struct frame_buffer {
+	uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+	uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+struct bps_frame_process_data {
+	struct frame_buffer buffers[BPS_IO_IMAGES_MAX];
+	uint32_t max_num_cores;
+	uint32_t target_time;
+	uint32_t ubwc_stats_buffer_addr;
+	uint32_t ubwc_stats_buffer_size;
+	uint32_t cdm_buffer_addr;
+	uint32_t cdm_buffer_size;
+	uint32_t iq_settings_addr;
+	uint32_t strip_lib_out_addr;
+	uint32_t cdm_prog_addr;
+	uint32_t request_id;
+};
+
+enum hfi_ipe_image_format {
+	IMAGE_FORMAT_INVALID,
+	IMAGE_FORMAT_MIPI_8,
+	IMAGE_FORMAT_MIPI_10,
+	IMAGE_FORMAT_MIPI_12,
+	IMAGE_FORMAT_MIPI_14,
+	IMAGE_FORMAT_BAYER_8,
+	IMAGE_FORMAT_BAYER_10,
+	IMAGE_FORMAT_BAYER_12,
+	IMAGE_FORMAT_BAYER_14,
+	IMAGE_FORMAT_PDI_10,
+	IMAGE_FORMAT_PD_10,
+	IMAGE_FORMAT_PD_8,
+	IMAGE_FORMAT_INDICATIONS,
+	IMAGE_FORMAT_REFINEMENT,
+	IMAGE_FORMAT_UBWC_TP_10,
+	IMAGE_FORMAT_UBWC_NV_12,
+	IMAGE_FORMAT_UBWC_NV12_4R,
+	IMAGE_FORMAT_UBWC_P010,
+	IMAGE_FORMAT_LINEAR_TP_10,
+	IMAGE_FORMAT_LINEAR_P010,
+	IMAGE_FORMAT_LINEAR_NV12,
+	IMAGE_FORMAT_LINEAR_PLAIN_16,
+	IMAGE_FORMAT_YUV422_8,
+	IMAGE_FORMAT_YUV422_10,
+	IMAGE_FORMAT_STATISTICS_BAYER_GRID,
+	IMAGE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
+	IMAGE_FORMAT_MAX
+};
+
+enum hfi_ipe_plane_format {
+	PLANE_FORMAT_INVALID = 0,
+	PLANE_FORMAT_MIPI_8,
+	PLANE_FORMAT_MIPI_10,
+	PLANE_FORMAT_MIPI_12,
+	PLANE_FORMAT_MIPI_14,
+	PLANE_FORMAT_BAYER_8,
+	PLANE_FORMAT_BAYER_10,
+	PLANE_FORMAT_BAYER_12,
+	PLANE_FORMAT_BAYER_14,
+	PLANE_FORMAT_PDI_10,
+	PLANE_FORMAT_PD_10,
+	PLANE_FORMAT_PD_8,
+	PLANE_FORMAT_INDICATIONS,
+	PLANE_FORMAT_REFINEMENT,
+	PLANE_FORMAT_UBWC_TP_10_Y,
+	PLANE_FORMAT_UBWC_TP_10_C,
+	PLANE_FORMAT_UBWC_NV_12_Y,
+	PLANE_FORMAT_UBWC_NV_12_C,
+	PLANE_FORMAT_UBWC_NV_12_4R_Y,
+	PLANE_FORMAT_UBWC_NV_12_4R_C,
+	PLANE_FORMAT_UBWC_P010_Y,
+	PLANE_FORMAT_UBWC_P010_C,
+	PLANE_FORMAT_LINEAR_TP_10_Y,
+	PLANE_FORMAT_LINEAR_TP_10_C,
+	PLANE_FORMAT_LINEAR_P010_Y,
+	PLANE_FORMAT_LINEAR_P010_C,
+	PLANE_FORMAT_LINEAR_NV12_Y,
+	PLANE_FORMAT_LINEAR_NV12_C,
+	PLANE_FORMAT_LINEAR_PLAIN_16_Y,
+	PLANE_FORMAT_LINEAR_PLAIN_16_C,
+	PLANE_FORMAT_YUV422_8,
+	PLANE_FORMAT_YUV422_10,
+	PLANE_FORMAT_STATISTICS_BAYER_GRID,
+	PLANE_FORMAT_STATISTICS_BAYER_HISTOGRAM,
+	PLANE_FORMAT_MAX
+};
+
+enum hfi_ipe_bayer_pixel_order {
+	FIRST_PIXEL_R,
+	FIRST_PIXEL_GR,
+	FIRST_PIXEL_B,
+	FIRST_PIXEL_GB,
+	FIRST_PIXEL_MAX
+};
+
+enum hfi_ipe_pixel_pack_alignment {
+	PIXEL_LSB_ALIGNED,
+	PIXEL_MSB_ALIGNED,
+};
+
+enum hfi_ipe_yuv_422_order {
+	PIXEL_ORDER_Y_U_Y_V,
+	PIXEL_ORDER_Y_V_Y_U,
+	PIXEL_ORDER_U_Y_V_Y,
+	PIXEL_ORDER_V_Y_U_Y,
+	PIXEL_ORDER_YUV422_MAX
+};
+
+enum ubwc_write_client {
+	IPE_WR_CLIENT_0 = 0,
+	IPE_WR_CLIENT_1,
+	IPE_WR_CLIENT_5,
+	IPE_WR_CLIENT_6,
+	IPE_WR_CLIENT_7,
+	IPE_WR_CLIENT_8,
+	IPE_WR_CLIENT_MAX
+};
+
+/**
+ * struct image_info
+ * @format: image format
+ * @img_width: image width
+ * @img_height: image height
+ * @bayer_order: pixel order
+ * @pix_align: alignment
+ * @yuv422_order: YUV order
+ * @byte_swap: byte swap
+ */
+struct image_info {
+	enum hfi_ipe_image_format format;
+	uint32_t img_width;
+	uint32_t img_height;
+	enum hfi_ipe_bayer_pixel_order bayer_order;
+	enum hfi_ipe_pixel_pack_alignment pix_align;
+	enum hfi_ipe_yuv_422_order yuv422_order;
+	uint32_t byte_swap;
+} __packed;
+
+/**
+ * struct buffer_layout
+ * @buf_stride: buffer stride
+ * @buf_height: buffer height
+ */
+struct buffer_layout {
+	uint32_t buf_stride;
+	uint32_t buf_height;
+} __packed;
+
+/**
+ * struct image_desc
+ * @info: image info
+ * @buf_layout: buffer layout
+ * @meta_buf_layout: meta buffer layout
+ */
+struct image_desc {
+	struct image_info info;
+	struct buffer_layout buf_layout[MAX_NUM_OF_IMAGE_PLANES];
+	struct buffer_layout meta_buf_layout[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+struct ica_stab_coeff {
+	uint32_t coeffs[8];
+} __packed;
+
+struct ica_stab_params {
+	uint32_t mode;
+	struct ica_stab_coeff transforms[3];
+} __packed;
+
+struct frame_set {
+	struct frame_buffer buffers[IPE_IO_IMAGES_MAX];
+	struct ica_stab_params ica_params;
+	uint32_t cdm_ica1_addr;
+	uint32_t cdm_ica2_addr;
+} __packed;
+
+struct ipe_frame_process_data {
+	uint32_t strip_lib_out_addr;
+	uint32_t iq_settings_addr;
+	uint32_t scratch_buffer_addr;
+	uint32_t scratch_buffer_size;
+	uint32_t ubwc_stats_buffer_addr;
+	uint32_t ubwc_stats_buffer_size;
+	uint32_t cdm_buffer_addr;
+	uint32_t cdm_buffer_size;
+	uint32_t max_num_cores;
+	uint32_t target_time;
+	uint32_t cdm_prog_base;
+	uint32_t cdm_pre_ltm;
+	uint32_t cdm_post_ltm;
+	uint32_t cdm_anr_full_pass;
+	uint32_t cdm_anr_ds4;
+	uint32_t cdm_anr_ds16;
+	uint32_t cdm_anr_ds64;
+	uint32_t cdm_tf_full_pass;
+	uint32_t cdm_tf_ds4;
+	uint32_t cdm_tf_ds16;
+	uint32_t cdm_tf_ds64;
+	uint32_t request_id;
+	uint32_t frames_in_batch;
+	struct frame_set framesets[MAX_HFR_GROUP];
+} __packed;
+
+/**
+ * struct hfi_cmd_ipe_config
+ * @images: images descreptions
+ * @user_data: user supplied data
+ *
+ * payload for IPE async command
+ */
+struct hfi_cmd_ipe_config {
+	struct image_desc images[IPE_IO_IMAGES_MAX];
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct frame_buffers
+ * @buf_ptr: buffer pointers for all planes
+ * @meta_buf_ptr: meta buffer pointers for all planes
+ */
+struct frame_buffers {
+	uint32_t buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+	uint32_t meta_buf_ptr[MAX_NUM_OF_IMAGE_PLANES];
+} __packed;
+
+/**
+ * struct hfi_msg_ipe_config
+ * @rc: result of ipe config command
+ * @scratch_mem_size: scratch mem size for a config
+ * @user_data: user data
+ */
+struct hfi_msg_ipe_config {
+	uint32_t rc;
+	uint32_t scratch_mem_size;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_bps_common
+ * @rc: result of ipe config command
+ * @user_data: user data
+ */
+struct hfi_msg_bps_common {
+	uint32_t rc;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct ipe_bps_destroy
+ * @user_data: user data
+ */
+struct ipe_bps_destroy {
+	uint64_t userdata;
+};
+
+/**
+ * struct hfi_msg_ipe_frame_process
+ * @status: result of ipe frame process command
+ * @scratch_buf_addr: address of scratch buffer
+ * @user_data: user data
+ */
+struct hfi_msg_ipe_frame_process {
+	uint32_t status;
+	uint32_t scratch_buf_addr;
+	uint64_t user_data;
+} __packed;
+
+#endif /* _CAM_HFI_SESSION_DEFS_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
new file mode 100644
index 0000000..582058d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/fw_inc/hfi_sys_defs.h
@@ -0,0 +1,528 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _HFI_DEFS_H_
+#define _HFI_DEFS_H_
+
+#include <linux/types.h>
+
+/*
+ * Following base acts as common starting points
+ * for all enumerations.
+ */
+#define HFI_COMMON_BASE                 0x0
+
+/* HFI Domain base offset for commands and messages */
+#define HFI_DOMAIN_SHFT                 (24)
+#define HFI_DOMAIN_BMSK                 (0x7 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_ICP             (0x0 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_IPE_BPS         (0x1 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_CDM             (0x2 << HFI_DOMAIN_SHFT)
+#define HFI_DOMAIN_BASE_DBG             (0x3 << HFI_DOMAIN_SHFT)
+
+/* Command base offset for commands */
+#define HFI_CMD_START_OFFSET            0x10000
+
+/* Command base offset for messages */
+#define HFI_MSG_START_OFFSET            0x20000
+
+/* System Level Error types */
+#define HFI_ERR_SYS_NONE                (HFI_COMMON_BASE)
+#define HFI_ERR_SYS_FATAL               (HFI_COMMON_BASE + 0x1)
+#define HFI_ERR_SYS_VERSION_MISMATCH    (HFI_COMMON_BASE + 0x2)
+#define HFI_ERR_SYS_UNSUPPORTED_DOMAIN  (HFI_COMMON_BASE + 0x3)
+#define HFI_ERR_SYS_UNSUPPORT_CMD       (HFI_COMMON_BASE + 0x4)
+#define HFI_ERR_SYS_CMDFAILED           (HFI_COMMON_BASE + 0x5)
+#define HFI_ERR_SYS_CMDSIZE             (HFI_COMMON_BASE + 0x6)
+
+/* System Level Event types */
+#define HFI_EVENT_SYS_ERROR             (HFI_COMMON_BASE + 0x1)
+#define HFI_EVENT_ICP_ERROR             (HFI_COMMON_BASE + 0x2)
+#define HFI_EVENT_IPE_BPS_ERROR         (HFI_COMMON_BASE + 0x3)
+#define HFI_EVENT_CDM_ERROR             (HFI_COMMON_BASE + 0x4)
+#define HFI_EVENT_DBG_ERROR             (HFI_COMMON_BASE + 0x5)
+
+/* Core level start Ranges for errors */
+#define HFI_ERR_ICP_START               (HFI_COMMON_BASE + 0x64)
+#define HFI_ERR_IPE_BPS_START           (HFI_ERR_ICP_START + 0x64)
+#define HFI_ERR_CDM_START               (HFI_ERR_IPE_BPS_START + 0x64)
+#define HFI_ERR_DBG_START               (HFI_ERR_CDM_START + 0x64)
+
+/*ICP Core level  error messages */
+#define HFI_ERR_NO_RES                  (HFI_ERR_ICP_START + 0x1)
+#define HFI_ERR_UNSUPPORTED_RES         (HFI_ERR_ICP_START + 0x2)
+#define HFI_ERR_UNSUPPORTED_PROP        (HFI_ERR_ICP_START + 0x3)
+#define HFI_ERR_INIT_EXPECTED           (HFI_ERR_ICP_START + 0x4)
+#define HFI_ERR_INIT_IGNORED            (HFI_ERR_ICP_START + 0x5)
+
+/* System level commands */
+#define HFI_CMD_COMMON_START \
+		(HFI_DOMAIN_BASE_ICP + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_SYS_INIT               (HFI_CMD_COMMON_START + 0x1)
+#define HFI_CMD_SYS_PC_PREP            (HFI_CMD_COMMON_START + 0x2)
+#define HFI_CMD_SYS_SET_PROPERTY       (HFI_CMD_COMMON_START + 0x3)
+#define HFI_CMD_SYS_GET_PROPERTY       (HFI_CMD_COMMON_START + 0x4)
+#define HFI_CMD_SYS_PING               (HFI_CMD_COMMON_START + 0x5)
+#define HFI_CMD_SYS_RESET              (HFI_CMD_COMMON_START + 0x6)
+
+/* Core level commands */
+/* IPE/BPS core Commands */
+#define HFI_CMD_IPE_BPS_COMMON_START \
+		(HFI_DOMAIN_BASE_IPE_BPS + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_IPEBPS_CREATE_HANDLE \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0x8)
+#define HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0xa)
+#define HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0xe)
+
+/* CDM core Commands */
+#define HFI_CMD_CDM_COMMON_START \
+		(HFI_DOMAIN_BASE_CDM + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_CDM_TEST_START (HFI_CMD_CDM_COMMON_START + 0x800)
+#define HFI_CMD_CDM_END        (HFI_CMD_CDM_COMMON_START + 0xFFF)
+
+/* Debug/Test Commands */
+#define HFI_CMD_DBG_COMMON_START \
+		(HFI_DOMAIN_BASE_DBG + HFI_CMD_START_OFFSET + 0x0)
+#define HFI_CMD_DBG_TEST_START  (HFI_CMD_DBG_COMMON_START + 0x800)
+#define HFI_CMD_DBG_END         (HFI_CMD_DBG_COMMON_START + 0xFFF)
+
+/* System level messages */
+#define HFI_MSG_ICP_COMMON_START \
+		(HFI_DOMAIN_BASE_ICP + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_SYS_INIT_DONE           (HFI_MSG_ICP_COMMON_START + 0x1)
+#define HFI_MSG_SYS_PC_PREP_DONE        (HFI_MSG_ICP_COMMON_START + 0x2)
+#define HFI_MSG_SYS_DEBUG               (HFI_MSG_ICP_COMMON_START + 0x3)
+#define HFI_MSG_SYS_IDLE                (HFI_MSG_ICP_COMMON_START + 0x4)
+#define HFI_MSG_SYS_PROPERTY_INFO       (HFI_MSG_ICP_COMMON_START + 0x5)
+#define HFI_MSG_SYS_PING_ACK            (HFI_MSG_ICP_COMMON_START + 0x6)
+#define HFI_MSG_SYS_RESET_ACK           (HFI_MSG_ICP_COMMON_START + 0x7)
+#define HFI_MSG_EVENT_NOTIFY            (HFI_MSG_ICP_COMMON_START + 0x8)
+
+/* Core level Messages */
+/* IPE/BPS core Messages */
+#define HFI_MSG_IPE_BPS_COMMON_START \
+		(HFI_DOMAIN_BASE_IPE_BPS + HFI_MSG_START_OFFSET + 0x0)
+#define HFI_MSG_IPEBPS_CREATE_HANDLE_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x08)
+#define HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x0a)
+#define HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x0e)
+#define HFI_MSG_IPE_BPS_TEST_START	\
+		(HFI_MSG_IPE_BPS_COMMON_START + 0x800)
+#define HFI_MSG_IPE_BPS_END \
+		(HFI_MSG_IPE_BPS_COMMON_START + 0xFFF)
+
+/* CDM core Messages */
+#define HFI_MSG_CDM_COMMON_START \
+		(HFI_DOMAIN_BASE_CDM + HFI_MSG_START_OFFSET + 0x0)
+#define  HFI_MSG_PRI_CDM_PAYLOAD_ACK    (HFI_MSG_CDM_COMMON_START + 0xa)
+#define  HFI_MSG_PRI_LLD_PAYLOAD_ACK    (HFI_MSG_CDM_COMMON_START + 0xb)
+#define HFI_MSG_CDM_TEST_START          (HFI_MSG_CDM_COMMON_START + 0x800)
+#define HFI_MSG_CDM_END                 (HFI_MSG_CDM_COMMON_START + 0xFFF)
+
+/* core level test command ranges */
+/* ICP core level test command range */
+#define HFI_CMD_ICP_TEST_START          (HFI_CMD_ICP_COMMON_START + 0x800)
+#define HFI_CMD_ICP_END                 (HFI_CMD_ICP_COMMON_START + 0xFFF)
+
+/* IPE/BPS core level test command range */
+#define HFI_CMD_IPE_BPS_TEST_START \
+		(HFI_CMD_IPE_BPS_COMMON_START + 0x800)
+#define HFI_CMD_IPE_BPS_END (HFI_CMD_IPE_BPS_COMMON_START + 0xFFF)
+
+/* ICP core level test message range */
+#define HFI_MSG_ICP_TEST_START  (HFI_MSG_ICP_COMMON_START + 0x800)
+#define HFI_MSG_ICP_END         (HFI_MSG_ICP_COMMON_START + 0xFFF)
+
+/* ICP core level Debug test message range */
+#define HFI_MSG_DBG_COMMON_START \
+		(HFI_DOMAIN_BASE_DBG + 0x0)
+#define HFI_MSG_DBG_TEST_START  (HFI_MSG_DBG_COMMON_START + 0x800)
+#define HFI_MSG_DBG_END         (HFI_MSG_DBG_COMMON_START + 0xFFF)
+
+/* System  level property base offset */
+#define HFI_PROPERTY_ICP_COMMON_START  (HFI_DOMAIN_BASE_ICP + 0x0)
+
+#define HFI_PROP_SYS_DEBUG_CFG         (HFI_PROPERTY_ICP_COMMON_START + 0x1)
+#define HFI_PROP_SYS_UBWC_CFG          (HFI_PROPERTY_ICP_COMMON_START + 0x2)
+#define HFI_PROP_SYS_IMAGE_VER         (HFI_PROPERTY_ICP_COMMON_START + 0x3)
+#define HFI_PROP_SYS_SUPPORTED         (HFI_PROPERTY_ICP_COMMON_START + 0x4)
+#define HFI_PROP_SYS_IPEBPS_PC         (HFI_PROPERTY_ICP_COMMON_START + 0x5)
+#define HFI_PROP_SYS_FW_DUMP_CFG       (HFI_PROPERTY_ICP_COMMON_START + 0x8)
+
+/* Capabilities reported at sys init */
+#define HFI_CAPS_PLACEHOLDER_1         (HFI_COMMON_BASE + 0x1)
+#define HFI_CAPS_PLACEHOLDER_2         (HFI_COMMON_BASE + 0x2)
+
+/* Section describes different debug levels (HFI_DEBUG_MSG_X)
+ * available for debug messages from FW
+ */
+#define  HFI_DEBUG_MSG_LOW      0x00000001
+#define  HFI_DEBUG_MSG_MEDIUM   0x00000002
+#define  HFI_DEBUG_MSG_HIGH     0x00000004
+#define  HFI_DEBUG_MSG_ERROR    0x00000008
+#define  HFI_DEBUG_MSG_FATAL    0x00000010
+/* Messages containing performance data */
+#define  HFI_DEBUG_MSG_PERF     0x00000020
+/* Disable ARM9 WFI in low power mode. */
+#define  HFI_DEBUG_CFG_WFI      0x01000000
+/* Disable ARM9 watchdog. */
+#define  HFI_DEBUG_CFG_ARM9WD   0x10000000
+
+
+/*
+ * HFI_FW_DUMP levels
+ * HFI_FW_DUMP_xx
+ */
+#define HFI_FW_DUMP_DISABLED    0x00000000
+#define HFI_FW_DUMP_ON_FAILURE  0x00000001
+#define HFI_FW_DUMP_ALWAYS      0x00000002
+
+/* Number of available dump levels. */
+#define NUM_HFI_DUMP_LVL        0x00000003
+
+/* Debug Msg Communication types:
+ * Section describes different modes (HFI_DEBUG_MODE_X)
+ * available to communicate the debug messages
+ */
+ /* Debug message output through the interface debug queue. */
+#define HFI_DEBUG_MODE_QUEUE     0x00000001
+ /* Debug message output through QDSS. */
+#define HFI_DEBUG_MODE_QDSS      0x00000002
+ /* Number of debug modes available. */
+#define NUM_HFI_DEBUG_MODE       0x00000002
+
+#define HFI_DEBUG_MSG_LOW        0x00000001
+#define HFI_DEBUG_MSG_MEDIUM     0x00000002
+#define HFI_DEBUG_MSG_HIGH       0x00000004
+#define HFI_DEBUG_MSG_ERROR      0x00000008
+#define HFI_DEBUG_MSG_FATAL      0x00000010
+#define HFI_DEBUG_MSG_PERF       0x00000020
+#define HFI_DEBUG_CFG_WFI        0x01000000
+#define HFI_DEBUG_CFG_ARM9WD     0x10000000
+
+#define HFI_DEV_VERSION_MAX      0x5
+
+/**
+ * start of sys command packet types
+ * These commands are used to get system level information
+ * from firmware
+ */
+
+/**
+ * struct hfi_caps_support
+ * payload to report caps through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
+ * @type: capability type
+ * @min: minimum supported value for the capability
+ * @max: maximum supported value for the capability
+ * @step_size: supported steps between min-max
+ */
+struct hfi_caps_support {
+	uint32_t type;
+	uint32_t min;
+	uint32_t max;
+	uint32_t step_size;
+} __packed;
+
+/**
+ * struct hfi_caps_support_info
+ * capability report through HFI_PROPERTY_PARAM_CAPABILITY_SUPPORTED
+ * @num_caps: number of capabilities listed
+ * @caps_data: capabilities info array
+ */
+struct hfi_caps_support_info {
+	uint32_t num_caps;
+	struct hfi_caps_support caps_data[1];
+} __packed;
+
+/**
+ * struct hfi_debug
+ * payload structure to configure HFI_PROPERTY_SYS_DEBUG_CONFIG
+ * @debug_config: it is a result of HFI_DEBUG_MSG_X values that
+ *                are OR-ed together to specify the debug message types
+ *                to otput
+ * @debug_mode: debug message output through debug queue/qdss
+ * @HFI_PROPERTY_SYS_DEBUG_CONFIG
+ */
+struct hfi_debug {
+	uint32_t debug_config;
+	uint32_t debug_mode;
+} __packed;
+
+/**
+ * struct hfi_ipe_bps_pc
+ * payload structure to configure HFI_PROPERTY_SYS_IPEBPS_PC
+ * @enable: Flag to enable IPE, BPS interfrane power collapse
+ * @core_info: Core information to firmware
+ */
+struct hfi_ipe_bps_pc {
+	uint32_t enable;
+	uint32_t core_info;
+} __packed;
+
+/**
+ * struct hfi_cmd_ubwc_cfg
+ * Payload structure to configure HFI_PROP_SYS_UBWC_CFG
+ * @ubwc_fetch_cfg: UBWC configuration for fecth
+ * @ubwc_write_cfg: UBWC configuration for write
+ */
+struct hfi_cmd_ubwc_cfg {
+	uint32_t ubwc_fetch_cfg;
+	uint32_t ubwc_write_cfg;
+};
+
+/**
+ * struct hfi_cmd_sys_init
+ * command to initialization of system session
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @HFI_CMD_SYS_INIT
+ */
+struct hfi_cmd_sys_init {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_cmd_pc_prep
+ * command to firmware to prepare for power collapse
+ * @eize: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @HFI_CMD_SYS_PC_PREP
+ */
+struct hfi_cmd_pc_prep {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_cmd_prop
+ * command to get/set properties of firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @num_prop: number of properties queried/set
+ * @prop_data: array of property IDs being queried. size depends on num_prop
+ *             array of property IDs and associated structure pairs in set
+ * @HFI_CMD_SYS_GET_PROPERTY
+ * @HFI_CMD_SYS_SET_PROPERTY
+ */
+struct hfi_cmd_prop {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_cmd_ping_pkt
+ * ping command pings the firmware to confirm whether
+ * it is alive.
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: client data, firmware returns this data
+ *             as part of HFI_MSG_SYS_PING_ACK
+ * @HFI_CMD_SYS_PING
+ */
+struct hfi_cmd_ping_pkt {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_cmd_sys_reset_pkt
+ * sends the reset command to FW. FW responds in the same type
+ * of packet. so can be used for reset_ack_pkt type also
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: client data, firmware returns this data
+ *             as part of HFI_MSG_SYS_RESET_ACK
+ * @HFI_CMD_SYS_RESET
+ */
+
+struct hfi_cmd_sys_reset_pkt {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/* end of sys command packet types */
+
+/* start of sys message packet types */
+
+/**
+ * struct hfi_prop
+ * structure to report maximum supported features of firmware.
+ */
+struct hfi_sys_support {
+	uint32_t place_holder;
+} __packed;
+
+/**
+ * struct hfi_supported_prop
+ * structure to report HFI_PROPERTY_PARAM_PROPERTIES_SUPPORTED
+ * for a session
+ * @num_prop: number of properties supported
+ * @prop_data: array of supported property IDs
+ */
+struct hfi_supported_prop {
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_image_version
+ * system image version
+ * @major: major version number
+ * @minor: minor version number
+ * @ver_name_size: size of version name
+ * @ver_name: image version name
+ */
+struct hfi_image_version {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t ver_name_size;
+	uint8_t  ver_name[1];
+} __packed;
+
+/**
+ * struct hfi_msg_init_done_data
+ * @api_ver:    Firmware API version
+ * @dev_ver:    Device version
+ * @num_icp_hw: Number of ICP hardware information
+ * @dev_hw_ver: Supported hardware version information
+ * @reserved:   Reserved field
+ */
+struct hfi_msg_init_done_data {
+	uint32_t api_ver;
+	uint32_t dev_ver;
+	uint32_t num_icp_hw;
+	uint32_t dev_hw_ver[HFI_DEV_VERSION_MAX];
+	uint32_t reserved;
+};
+
+/**
+ * struct hfi_msg_init_done
+ * system init done message from firmware. Many system level properties
+ * are returned with the packet
+ * @size:      Packet size in bytes
+ * @pkt_type:  Opcode of a packet
+ * @err_type:  Error code associated with response
+ * @num_prop:  Number of default capability info
+ * @prop_data: Array of property ids and corresponding structure pairs
+ */
+struct hfi_msg_init_done {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_pc_prep_done
+ * system power collapse preparation done message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @err_type: error code associated with the response
+ */
+struct hfi_msg_pc_prep_done {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t err_type;
+} __packed;
+
+/**
+ * struct hfi_msg_prop
+ * system property info from firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @num_prop: number of property info structures
+ * @prop_data: array of property IDs and associated structure pairs
+ */
+struct hfi_msg_prop {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t num_prop;
+	uint32_t prop_data[1];
+} __packed;
+
+/**
+ * struct hfi_msg_idle
+ * system idle message from firmware
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ */
+struct hfi_msg_idle {
+	uint32_t size;
+	uint32_t pkt_type;
+} __packed;
+
+/**
+ * struct hfi_msg_ping_ack
+ * system ping ack message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @user_data: this data is sent as part of ping command from host
+ */
+struct hfi_msg_ping_ack {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint64_t user_data;
+} __packed;
+
+/**
+ * struct hfi_msg_debug
+ * system debug message defination
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @msg_type: debug message type
+ * @msg_size: size of debug message in bytes
+ * @timestamp_hi: most significant 32 bits of the 64 bit timestamp field.
+ *                timestamp shall be interpreted as a signed 64-bit value
+ *                representing microseconds.
+ * @timestamp_lo: least significant 32 bits of the 64 bit timestamp field.
+ *                timestamp shall be interpreted as a signed 64-bit value
+ *                representing microseconds.
+ * @msg_data: message data in string form
+ */
+struct hfi_msg_debug {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t msg_type;
+	uint32_t msg_size;
+	uint32_t timestamp_hi;
+	uint32_t timestamp_lo;
+	uint8_t  msg_data[1];
+} __packed;
+/**
+ * struct hfi_msg_event_notify
+ * event notify message
+ * @size: packet size in bytes
+ * @pkt_type: opcode of a packet
+ * @fw_handle: firmware session handle
+ * @event_id: session event id
+ * @event_data1: event data corresponding to event ID
+ * @event_data2: event data corresponding to event ID
+ * @ext_event_data: info array, interpreted based on event_data1
+ * and event_data2
+ */
+struct hfi_msg_event_notify {
+	uint32_t size;
+	uint32_t pkt_type;
+	uint32_t fw_handle;
+	uint32_t event_id;
+	uint32_t event_data1;
+	uint32_t event_data2;
+	uint32_t ext_event_data[1];
+} __packed;
+/**
+ * end of sys message packet types
+ */
+
+#endif /* _HFI_DEFS_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_icp/hfi.c b/drivers/media/platform/msm/camera/cam_icp/hfi.c
new file mode 100644
index 0000000..e3b292d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/hfi.c
@@ -0,0 +1,890 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <asm/errno.h>
+#include <linux/timer.h>
+#include <media/cam_icp.h>
+#include <linux/iopoll.h>
+#include <soc/qcom/socinfo.h>
+
+#include "cam_io_util.h"
+#include "hfi_reg.h"
+#include "hfi_sys_defs.h"
+#include "hfi_session_defs.h"
+#include "hfi_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+#include "cam_soc_util.h"
+
+#define HFI_VERSION_INFO_MAJOR_VAL  1
+#define HFI_VERSION_INFO_MINOR_VAL  1
+#define HFI_VERSION_INFO_STEP_VAL   0
+#define HFI_VERSION_INFO_STEP_VAL   0
+#define HFI_VERSION_INFO_MAJOR_BMSK  0xFF000000
+#define HFI_VERSION_INFO_MAJOR_SHFT  24
+#define HFI_VERSION_INFO_MINOR_BMSK  0xFFFF00
+#define HFI_VERSION_INFO_MINOR_SHFT  8
+#define HFI_VERSION_INFO_STEP_BMSK   0xFF
+#define HFI_VERSION_INFO_STEP_SHFT  0
+
+#define HFI_MAX_POLL_TRY 5
+
+static struct hfi_info *g_hfi;
+unsigned int g_icp_mmu_hdl;
+static DEFINE_MUTEX(hfi_cmd_q_mutex);
+static DEFINE_MUTEX(hfi_msg_q_mutex);
+
+void cam_hfi_queue_dump(void)
+{
+	struct hfi_qtbl *qtbl;
+	struct hfi_qtbl_hdr *qtbl_hdr;
+	struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr;
+	struct hfi_mem_info *hfi_mem = NULL;
+	uint32_t *read_q, *read_ptr;
+	int i;
+
+	hfi_mem = &g_hfi->map;
+	if (!hfi_mem) {
+		CAM_ERR(CAM_HFI, "Unable to dump queues hfi memory is NULL");
+		return;
+	}
+
+	qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
+	qtbl_hdr = &qtbl->q_tbl_hdr;
+	CAM_DBG(CAM_HFI,
+		"qtbl: version = %x size = %u num q = %u qhdr_size = %u",
+		qtbl_hdr->qtbl_version, qtbl_hdr->qtbl_size,
+		qtbl_hdr->qtbl_num_q, qtbl_hdr->qtbl_qhdr_size);
+
+	cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
+	CAM_DBG(CAM_HFI, "cmd: size = %u r_idx = %u w_idx = %u addr = %x",
+		cmd_q_hdr->qhdr_q_size, cmd_q_hdr->qhdr_read_idx,
+		cmd_q_hdr->qhdr_write_idx, hfi_mem->cmd_q.iova);
+	read_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+	read_ptr = (uint32_t *)(read_q + 0);
+	CAM_DBG(CAM_HFI, "CMD Q START");
+	for (i = 0; i < ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
+		CAM_DBG(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
+
+	msg_q_hdr = &qtbl->q_hdr[Q_MSG];
+	CAM_DBG(CAM_HFI, "msg: size = %u r_idx = %u w_idx = %u addr = %x",
+		msg_q_hdr->qhdr_q_size, msg_q_hdr->qhdr_read_idx,
+		msg_q_hdr->qhdr_write_idx, hfi_mem->msg_q.iova);
+	read_q = (uint32_t *)g_hfi->map.msg_q.kva;
+	read_ptr = (uint32_t *)(read_q + 0);
+	CAM_DBG(CAM_HFI, "MSG Q START");
+	for (i = 0; i < ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT; i++)
+		CAM_DBG(CAM_HFI, "Word: %d Data: 0x%08x ", i, read_ptr[i]);
+}
+
+int hfi_write_cmd(void *cmd_ptr)
+{
+	uint32_t size_in_words, empty_space, new_write_idx, read_idx, temp;
+	uint32_t *write_q, *write_ptr;
+	struct hfi_qtbl *q_tbl;
+	struct hfi_q_hdr *q;
+	int rc = 0;
+
+	if (!cmd_ptr) {
+		CAM_ERR(CAM_HFI, "command is null");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hfi_cmd_q_mutex);
+	if (!g_hfi) {
+		CAM_ERR(CAM_HFI, "HFI interface not setup");
+		rc = -ENODEV;
+		goto err;
+	}
+
+	if (g_hfi->hfi_state != HFI_READY ||
+		!g_hfi->cmd_q_state) {
+		CAM_ERR(CAM_HFI, "HFI state: %u, cmd q state: %u",
+			g_hfi->hfi_state, g_hfi->cmd_q_state);
+		rc = -ENODEV;
+		goto err;
+	}
+
+	q_tbl = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
+	q = &q_tbl->q_hdr[Q_CMD];
+
+	write_q = (uint32_t *)g_hfi->map.cmd_q.kva;
+
+	size_in_words = (*(uint32_t *)cmd_ptr) >> BYTE_WORD_SHIFT;
+	if (!size_in_words) {
+		CAM_DBG(CAM_HFI, "failed");
+		rc = -EINVAL;
+		goto err;
+	}
+
+	read_idx = q->qhdr_read_idx;
+	empty_space = (q->qhdr_write_idx >= read_idx) ?
+		(q->qhdr_q_size - (q->qhdr_write_idx - read_idx)) :
+		(read_idx - q->qhdr_write_idx);
+	if (empty_space <= size_in_words) {
+		CAM_ERR(CAM_HFI, "failed: empty space %u, size_in_words %u",
+			empty_space, size_in_words);
+		rc = -EIO;
+		goto err;
+	}
+
+	new_write_idx = q->qhdr_write_idx + size_in_words;
+	write_ptr = (uint32_t *)(write_q + q->qhdr_write_idx);
+
+	if (new_write_idx < q->qhdr_q_size) {
+		memcpy(write_ptr, (uint8_t *)cmd_ptr,
+			size_in_words << BYTE_WORD_SHIFT);
+	} else {
+		new_write_idx -= q->qhdr_q_size;
+		temp = (size_in_words - new_write_idx) << BYTE_WORD_SHIFT;
+		memcpy(write_ptr, (uint8_t *)cmd_ptr, temp);
+		memcpy(write_q, (uint8_t *)cmd_ptr + temp,
+			new_write_idx << BYTE_WORD_SHIFT);
+	}
+
+	/*
+	 * To make sure command data in a command queue before
+	 * updating write index
+	 */
+	wmb();
+
+	q->qhdr_write_idx = new_write_idx;
+
+	/*
+	 * Before raising interrupt make sure command data is ready for
+	 * firmware to process
+	 */
+	wmb();
+	cam_io_w_mb((uint32_t)INTR_ENABLE,
+		g_hfi->csr_base + HFI_REG_A5_CSR_HOST2ICPINT);
+err:
+	mutex_unlock(&hfi_cmd_q_mutex);
+	return rc;
+}
+
+int hfi_read_message(uint32_t *pmsg, uint8_t q_id,
+	uint32_t *words_read)
+{
+	struct hfi_qtbl *q_tbl_ptr;
+	struct hfi_q_hdr *q;
+	uint32_t new_read_idx, size_in_words, word_diff, temp;
+	uint32_t *read_q, *read_ptr, *write_ptr;
+	uint32_t size_upper_bound = 0;
+	int rc = 0;
+
+	if (!pmsg) {
+		CAM_ERR(CAM_HFI, "Invalid msg");
+		return -EINVAL;
+	}
+
+	if (q_id > Q_DBG) {
+		CAM_ERR(CAM_HFI, "Invalid q :%u", q_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hfi_msg_q_mutex);
+	if (!g_hfi) {
+		CAM_ERR(CAM_HFI, "hfi not set up yet");
+		rc = -ENODEV;
+		goto err;
+	}
+
+	if ((g_hfi->hfi_state != HFI_READY) ||
+		!g_hfi->msg_q_state) {
+		CAM_ERR(CAM_HFI, "hfi state: %u, msg q state: %u",
+			g_hfi->hfi_state, g_hfi->msg_q_state);
+		rc = -ENODEV;
+		goto err;
+	}
+
+	q_tbl_ptr = (struct hfi_qtbl *)g_hfi->map.qtbl.kva;
+	q = &q_tbl_ptr->q_hdr[q_id];
+
+	if (q->qhdr_read_idx == q->qhdr_write_idx) {
+		CAM_DBG(CAM_HFI, "Q not ready, state:%u, r idx:%u, w idx:%u",
+			g_hfi->hfi_state, q->qhdr_read_idx, q->qhdr_write_idx);
+		rc = -EIO;
+		goto err;
+	}
+
+	if (q_id == Q_MSG) {
+		read_q = (uint32_t *)g_hfi->map.msg_q.kva;
+		size_upper_bound = ICP_HFI_MAX_PKT_SIZE_MSGQ_IN_WORDS;
+	} else {
+		read_q = (uint32_t *)g_hfi->map.dbg_q.kva;
+		size_upper_bound = ICP_HFI_MAX_PKT_SIZE_IN_WORDS;
+	}
+
+	read_ptr = (uint32_t *)(read_q + q->qhdr_read_idx);
+	write_ptr = (uint32_t *)(read_q + q->qhdr_write_idx);
+
+	if (write_ptr > read_ptr)
+		size_in_words = write_ptr - read_ptr;
+	else {
+		word_diff = read_ptr - write_ptr;
+		if (q_id == Q_MSG)
+			size_in_words = (ICP_MSG_Q_SIZE_IN_BYTES >>
+			BYTE_WORD_SHIFT) - word_diff;
+		else
+			size_in_words = (ICP_DBG_Q_SIZE_IN_BYTES >>
+			BYTE_WORD_SHIFT) - word_diff;
+	}
+
+	if ((size_in_words == 0) ||
+		(size_in_words > size_upper_bound)) {
+		CAM_ERR(CAM_HFI, "invalid HFI message packet size - 0x%08x",
+			size_in_words << BYTE_WORD_SHIFT);
+		q->qhdr_read_idx = q->qhdr_write_idx;
+		rc = -EIO;
+		goto err;
+	}
+
+	new_read_idx = q->qhdr_read_idx + size_in_words;
+
+	if (new_read_idx < q->qhdr_q_size) {
+		memcpy(pmsg, read_ptr, size_in_words << BYTE_WORD_SHIFT);
+	} else {
+		new_read_idx -= q->qhdr_q_size;
+		temp = (size_in_words - new_read_idx) << BYTE_WORD_SHIFT;
+		memcpy(pmsg, read_ptr, temp);
+		memcpy((uint8_t *)pmsg + temp, read_q,
+			new_read_idx << BYTE_WORD_SHIFT);
+	}
+
+	q->qhdr_read_idx = new_read_idx;
+	*words_read = size_in_words;
+	/* Memory Barrier to make sure message
+	 * queue parameters are updated after read
+	 */
+	wmb();
+err:
+	mutex_unlock(&hfi_msg_q_mutex);
+	return rc;
+}
+
+int hfi_cmd_ubwc_config(uint32_t *ubwc_cfg)
+{
+	uint8_t *prop;
+	struct hfi_cmd_prop *dbg_prop;
+	uint32_t size = 0;
+
+	size = sizeof(struct hfi_cmd_prop) +
+		sizeof(struct hfi_cmd_ubwc_cfg);
+
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	dbg_prop = (struct hfi_cmd_prop *)prop;
+	dbg_prop->size = size;
+	dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	dbg_prop->num_prop = 1;
+	dbg_prop->prop_data[0] = HFI_PROP_SYS_UBWC_CFG;
+	dbg_prop->prop_data[1] = ubwc_cfg[0];
+	dbg_prop->prop_data[2] = ubwc_cfg[1];
+
+	hfi_write_cmd(prop);
+	kfree(prop);
+
+	return 0;
+}
+
+int hfi_enable_ipe_bps_pc(bool enable, uint32_t core_info)
+{
+	uint8_t *prop;
+	struct hfi_cmd_prop *dbg_prop;
+	uint32_t size = 0;
+
+	size = sizeof(struct hfi_cmd_prop) +
+		sizeof(struct hfi_ipe_bps_pc);
+
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	dbg_prop = (struct hfi_cmd_prop *)prop;
+	dbg_prop->size = size;
+	dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	dbg_prop->num_prop = 1;
+	dbg_prop->prop_data[0] = HFI_PROP_SYS_IPEBPS_PC;
+	dbg_prop->prop_data[1] = enable;
+	dbg_prop->prop_data[2] = core_info;
+
+	hfi_write_cmd(prop);
+	kfree(prop);
+
+	return 0;
+}
+
+int hfi_set_debug_level(u64 a5_dbg_type, uint32_t lvl)
+{
+	uint8_t *prop;
+	struct hfi_cmd_prop *dbg_prop;
+	uint32_t size = 0, val;
+
+	val = HFI_DEBUG_MSG_LOW |
+		HFI_DEBUG_MSG_MEDIUM |
+		HFI_DEBUG_MSG_HIGH |
+		HFI_DEBUG_MSG_ERROR |
+		HFI_DEBUG_MSG_FATAL |
+		HFI_DEBUG_MSG_PERF |
+		HFI_DEBUG_CFG_WFI |
+		HFI_DEBUG_CFG_ARM9WD;
+
+	if (lvl > val)
+		return -EINVAL;
+
+	size = sizeof(struct hfi_cmd_prop) +
+		sizeof(struct hfi_debug);
+
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	dbg_prop = (struct hfi_cmd_prop *)prop;
+	dbg_prop->size = size;
+	dbg_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	dbg_prop->num_prop = 1;
+	dbg_prop->prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
+	dbg_prop->prop_data[1] = lvl;
+	dbg_prop->prop_data[2] = a5_dbg_type;
+	hfi_write_cmd(prop);
+
+	kfree(prop);
+
+	return 0;
+}
+
+int hfi_set_fw_dump_level(uint32_t lvl)
+{
+	uint8_t *prop = NULL;
+	struct hfi_cmd_prop *fw_dump_level_switch_prop = NULL;
+	uint32_t size = 0;
+
+	CAM_DBG(CAM_HFI, "fw dump ENTER");
+
+	size = sizeof(struct hfi_cmd_prop) + sizeof(lvl);
+	prop = kzalloc(size, GFP_KERNEL);
+	if (!prop)
+		return -ENOMEM;
+
+	fw_dump_level_switch_prop = (struct hfi_cmd_prop *)prop;
+	fw_dump_level_switch_prop->size = size;
+	fw_dump_level_switch_prop->pkt_type = HFI_CMD_SYS_SET_PROPERTY;
+	fw_dump_level_switch_prop->num_prop = 1;
+	fw_dump_level_switch_prop->prop_data[0] = HFI_PROP_SYS_FW_DUMP_CFG;
+	fw_dump_level_switch_prop->prop_data[1] = lvl;
+
+	CAM_DBG(CAM_HFI, "prop->size = %d\n"
+			 "prop->pkt_type = %d\n"
+			 "prop->num_prop = %d\n"
+			 "prop->prop_data[0] = %d\n"
+			 "prop->prop_data[1] = %d\n",
+			 fw_dump_level_switch_prop->size,
+			 fw_dump_level_switch_prop->pkt_type,
+			 fw_dump_level_switch_prop->num_prop,
+			 fw_dump_level_switch_prop->prop_data[0],
+			 fw_dump_level_switch_prop->prop_data[1]);
+
+	hfi_write_cmd(prop);
+	kfree(prop);
+	return 0;
+}
+
+void hfi_send_system_cmd(uint32_t type, uint64_t data, uint32_t size)
+{
+	switch (type) {
+	case HFI_CMD_SYS_INIT: {
+		struct hfi_cmd_sys_init init;
+
+		memset(&init, 0, sizeof(init));
+
+		init.size = sizeof(struct hfi_cmd_sys_init);
+		init.pkt_type = type;
+		hfi_write_cmd(&init);
+	}
+		break;
+	case HFI_CMD_SYS_PC_PREP: {
+		struct hfi_cmd_pc_prep prep;
+
+		prep.size = sizeof(struct hfi_cmd_pc_prep);
+		prep.pkt_type = type;
+		hfi_write_cmd(&prep);
+	}
+		break;
+	case HFI_CMD_SYS_SET_PROPERTY: {
+		struct hfi_cmd_prop prop;
+
+		if ((uint32_t)data == (uint32_t)HFI_PROP_SYS_DEBUG_CFG) {
+			prop.size = sizeof(struct hfi_cmd_prop);
+			prop.pkt_type = type;
+			prop.num_prop = 1;
+			prop.prop_data[0] = HFI_PROP_SYS_DEBUG_CFG;
+			hfi_write_cmd(&prop);
+		}
+	}
+		break;
+	case HFI_CMD_SYS_GET_PROPERTY:
+		break;
+	case HFI_CMD_SYS_PING: {
+		struct hfi_cmd_ping_pkt ping;
+
+		ping.size = sizeof(struct hfi_cmd_ping_pkt);
+		ping.pkt_type = type;
+		ping.user_data = (uint64_t)data;
+		hfi_write_cmd(&ping);
+	}
+		break;
+	case HFI_CMD_SYS_RESET: {
+		struct hfi_cmd_sys_reset_pkt reset;
+
+		reset.size = sizeof(struct hfi_cmd_sys_reset_pkt);
+		reset.pkt_type = type;
+		reset.user_data = (uint64_t)data;
+		hfi_write_cmd(&reset);
+	}
+		break;
+	case HFI_CMD_IPEBPS_CREATE_HANDLE: {
+		struct hfi_cmd_create_handle handle;
+
+		handle.size = sizeof(struct hfi_cmd_create_handle);
+		handle.pkt_type = type;
+		handle.handle_type = (uint32_t)data;
+		handle.user_data1 = 0;
+		hfi_write_cmd(&handle);
+	}
+		break;
+	case HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT:
+		break;
+	default:
+		CAM_ERR(CAM_HFI, "command not supported :%d", type);
+		break;
+	}
+}
+
+
+int hfi_get_hw_caps(void *query_buf)
+{
+	int i = 0;
+	struct cam_icp_query_cap_cmd *query_cmd = NULL;
+
+	if (!query_buf) {
+		CAM_ERR(CAM_HFI, "query buf is NULL");
+		return -EINVAL;
+	}
+
+	query_cmd = (struct cam_icp_query_cap_cmd *)query_buf;
+	query_cmd->fw_version.major = 0x12;
+	query_cmd->fw_version.minor = 0x12;
+	query_cmd->fw_version.revision = 0x12;
+
+	query_cmd->api_version.major = 0x13;
+	query_cmd->api_version.minor = 0x13;
+	query_cmd->api_version.revision = 0x13;
+
+	query_cmd->num_ipe = 2;
+	query_cmd->num_bps = 1;
+
+	for (i = 0; i < CAM_ICP_DEV_TYPE_MAX; i++) {
+		query_cmd->dev_ver[i].dev_type = i;
+		query_cmd->dev_ver[i].hw_ver.major = 0x34 + i;
+		query_cmd->dev_ver[i].hw_ver.minor = 0x34 + i;
+		query_cmd->dev_ver[i].hw_ver.incr = 0x34 + i;
+	}
+	return 0;
+}
+
+void cam_hfi_disable_cpu(void __iomem *icp_base)
+{
+	uint32_t data;
+	uint32_t val;
+	uint32_t try = 0;
+
+	while (try < HFI_MAX_POLL_TRY) {
+		data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+		CAM_DBG(CAM_HFI, "wfi status = %x\n", (int)data);
+
+		if (data & ICP_CSR_A5_STATUS_WFI)
+			break;
+		/* Need to poll here to confirm that FW is going trigger wfi
+		 * and Host can the proceed. No interrupt is expected from FW
+		 * at this time.
+		 */
+		msleep(100);
+		try++;
+	}
+
+	val = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	val &= ~(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN);
+	cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+
+	val = cam_io_r(icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+	cam_io_w_mb(val, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+
+	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_RESET,
+		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+	cam_io_w_mb((uint32_t)INTR_DISABLE,
+		icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+}
+
+void cam_hfi_enable_cpu(void __iomem *icp_base)
+{
+	cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	cam_io_w_mb((uint32_t)0x10, icp_base + HFI_REG_A5_CSR_NSEC_RESET);
+}
+
+int cam_hfi_resume(struct hfi_mem_info *hfi_mem,
+	void __iomem *icp_base, bool debug)
+{
+	int rc = 0;
+	uint32_t data;
+	uint32_t fw_version, status = 0;
+	uint32_t retry_cnt = 0;
+
+	cam_hfi_enable_cpu(icp_base);
+	g_hfi->csr_base = icp_base;
+
+	if (debug) {
+		cam_io_w_mb(ICP_FLAG_A5_CTRL_DBG_EN,
+			(icp_base + HFI_REG_A5_CSR_A5_CONTROL));
+
+		/* Barrier needed as next write should be done after
+		 * sucessful previous write. Next write enable clock
+		 * gating
+		 */
+		wmb();
+
+		cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+
+	} else {
+		cam_io_w_mb((uint32_t)ICP_FLAG_A5_CTRL_EN,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	}
+
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+			status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
+
+		CAM_DBG(CAM_HFI, "1: status = %u", status);
+		status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+		CAM_DBG(CAM_HFI, "2: status = %u", status);
+		if (status == ICP_INIT_RESP_SUCCESS)
+			break;
+
+		if (status == ICP_INIT_RESP_FAILED) {
+			CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
+				status);
+			fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+			CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+			return -EINVAL;
+		}
+		retry_cnt++;
+	}
+
+	if ((retry_cnt == HFI_MAX_POLL_TRY) &&
+		(status == ICP_INIT_RESP_RESET)) {
+		CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
+				status);
+		fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+		CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+		return -EINVAL;
+	}
+
+	cam_io_w_mb((uint32_t)(INTR_ENABLE|INTR_ENABLE_WD0),
+		icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+	CAM_DBG(CAM_HFI, "fw version : [%x]", fw_version);
+
+	data = cam_io_r(icp_base + HFI_REG_A5_CSR_A5_STATUS);
+	CAM_DBG(CAM_HFI, "wfi status = %x", (int)data);
+
+	cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova, icp_base + HFI_REG_QTBL_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
+		icp_base + HFI_REG_SFR_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
+		icp_base + HFI_REG_SHARED_MEM_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
+		icp_base + HFI_REG_SHARED_MEM_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
+		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
+		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
+		icp_base + HFI_REG_QDSS_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
+		icp_base + HFI_REG_QDSS_IOVA_SIZE);
+
+	return rc;
+}
+
+int cam_hfi_init(uint8_t event_driven_mode, struct hfi_mem_info *hfi_mem,
+		void __iomem *icp_base, bool debug)
+{
+	int rc = 0;
+	struct hfi_qtbl *qtbl;
+	struct hfi_qtbl_hdr *qtbl_hdr;
+	struct hfi_q_hdr *cmd_q_hdr, *msg_q_hdr, *dbg_q_hdr;
+	uint32_t hw_version, soc_version, fw_version, status = 0;
+	uint32_t retry_cnt = 0;
+	struct sfr_buf *sfr_buffer;
+
+	mutex_lock(&hfi_cmd_q_mutex);
+	mutex_lock(&hfi_msg_q_mutex);
+
+	if (!g_hfi) {
+		g_hfi = kzalloc(sizeof(struct hfi_info), GFP_KERNEL);
+		if (!g_hfi) {
+			rc = -ENOMEM;
+			goto alloc_fail;
+		}
+	}
+
+	if (g_hfi->hfi_state != HFI_DEINIT) {
+		CAM_ERR(CAM_HFI, "hfi_init: invalid state");
+		return -EINVAL;
+	}
+
+	memcpy(&g_hfi->map, hfi_mem, sizeof(g_hfi->map));
+	g_hfi->hfi_state = HFI_DEINIT;
+	soc_version = socinfo_get_version();
+	if (debug) {
+		cam_io_w_mb(
+		(uint32_t)(ICP_FLAG_CSR_A5_EN | ICP_FLAG_CSR_WAKE_UP_EN |
+		ICP_CSR_EDBGRQ | ICP_CSR_DBGSWENABLE),
+		icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+		msleep(100);
+		cam_io_w_mb((uint32_t)(ICP_FLAG_CSR_A5_EN |
+		ICP_FLAG_CSR_WAKE_UP_EN | ICP_CSR_EN_CLKGATE_WFI),
+		icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	} else {
+		/* Due to hardware bug in V1 ICP clock gating has to be
+		 * disabled, this is supposed to be fixed in V-2. But enabling
+		 * the clock gating is causing the firmware hang, hence
+		 * disabling the clock gating on both V1 and V2 until the
+		 * hardware team root causes this
+		 */
+		cam_io_w_mb((uint32_t)ICP_FLAG_CSR_A5_EN |
+			ICP_FLAG_CSR_WAKE_UP_EN |
+			ICP_CSR_EN_CLKGATE_WFI,
+			icp_base + HFI_REG_A5_CSR_A5_CONTROL);
+	}
+
+	qtbl = (struct hfi_qtbl *)hfi_mem->qtbl.kva;
+	qtbl_hdr = &qtbl->q_tbl_hdr;
+	qtbl_hdr->qtbl_version = 0xFFFFFFFF;
+	qtbl_hdr->qtbl_size = sizeof(struct hfi_qtbl);
+	qtbl_hdr->qtbl_qhdr0_offset = sizeof(struct hfi_qtbl_hdr);
+	qtbl_hdr->qtbl_qhdr_size = sizeof(struct hfi_q_hdr);
+	qtbl_hdr->qtbl_num_q = ICP_HFI_NUMBER_OF_QS;
+	qtbl_hdr->qtbl_num_active_q = ICP_HFI_NUMBER_OF_QS;
+
+	/* setup host-to-firmware command queue */
+	cmd_q_hdr = &qtbl->q_hdr[Q_CMD];
+	cmd_q_hdr->qhdr_status = QHDR_ACTIVE;
+	cmd_q_hdr->qhdr_start_addr = hfi_mem->cmd_q.iova;
+	cmd_q_hdr->qhdr_q_size =  ICP_CMD_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	cmd_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	cmd_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	cmd_q_hdr->qhdr_read_idx = RESET;
+	cmd_q_hdr->qhdr_write_idx = RESET;
+
+	/* setup firmware-to-Host message queue */
+	msg_q_hdr = &qtbl->q_hdr[Q_MSG];
+	msg_q_hdr->qhdr_status = QHDR_ACTIVE;
+	msg_q_hdr->qhdr_start_addr = hfi_mem->msg_q.iova;
+	msg_q_hdr->qhdr_q_size = ICP_MSG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	msg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	msg_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	msg_q_hdr->qhdr_read_idx = RESET;
+	msg_q_hdr->qhdr_write_idx = RESET;
+
+	/* setup firmware-to-Host message queue */
+	dbg_q_hdr = &qtbl->q_hdr[Q_DBG];
+	dbg_q_hdr->qhdr_status = QHDR_ACTIVE;
+	dbg_q_hdr->qhdr_start_addr = hfi_mem->dbg_q.iova;
+	dbg_q_hdr->qhdr_q_size = ICP_DBG_Q_SIZE_IN_BYTES >> BYTE_WORD_SHIFT;
+	dbg_q_hdr->qhdr_pkt_size = ICP_HFI_VAR_SIZE_PKT;
+	dbg_q_hdr->qhdr_pkt_drop_cnt = RESET;
+	dbg_q_hdr->qhdr_read_idx = RESET;
+	dbg_q_hdr->qhdr_write_idx = RESET;
+
+	sfr_buffer = (struct sfr_buf *)hfi_mem->sfr_buf.kva;
+	sfr_buffer->size = ICP_MSG_SFR_SIZE_IN_BYTES;
+
+	switch (event_driven_mode) {
+	case INTR_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD;
+		cmd_q_hdr->qhdr_rx_wm = SET;
+		cmd_q_hdr->qhdr_tx_wm = SET;
+		cmd_q_hdr->qhdr_rx_req = SET;
+		cmd_q_hdr->qhdr_tx_req = RESET;
+		cmd_q_hdr->qhdr_rx_irq_status = RESET;
+		cmd_q_hdr->qhdr_tx_irq_status = RESET;
+
+		msg_q_hdr->qhdr_type = Q_MSG;
+		msg_q_hdr->qhdr_rx_wm = SET;
+		msg_q_hdr->qhdr_tx_wm = SET;
+		msg_q_hdr->qhdr_rx_req = SET;
+		msg_q_hdr->qhdr_tx_req = RESET;
+		msg_q_hdr->qhdr_rx_irq_status = RESET;
+		msg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		dbg_q_hdr->qhdr_type = Q_DBG;
+		dbg_q_hdr->qhdr_rx_wm = SET;
+		dbg_q_hdr->qhdr_tx_wm = SET_WM;
+		dbg_q_hdr->qhdr_rx_req = RESET;
+		dbg_q_hdr->qhdr_tx_req = RESET;
+		dbg_q_hdr->qhdr_rx_irq_status = RESET;
+		dbg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		break;
+
+	case POLL_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_POLL_MODE_2 |
+			RX_EVENT_POLL_MODE_2;
+		break;
+
+	case WM_MODE:
+		cmd_q_hdr->qhdr_type = Q_CMD | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		cmd_q_hdr->qhdr_rx_wm = SET;
+		cmd_q_hdr->qhdr_tx_wm = SET;
+		cmd_q_hdr->qhdr_rx_req = RESET;
+		cmd_q_hdr->qhdr_tx_req = SET;
+		cmd_q_hdr->qhdr_rx_irq_status = RESET;
+		cmd_q_hdr->qhdr_tx_irq_status = RESET;
+
+		msg_q_hdr->qhdr_type = Q_MSG | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		msg_q_hdr->qhdr_rx_wm = SET;
+		msg_q_hdr->qhdr_tx_wm = SET;
+		msg_q_hdr->qhdr_rx_req = SET;
+		msg_q_hdr->qhdr_tx_req = RESET;
+		msg_q_hdr->qhdr_rx_irq_status = RESET;
+		msg_q_hdr->qhdr_tx_irq_status = RESET;
+
+		dbg_q_hdr->qhdr_type = Q_DBG | TX_EVENT_DRIVEN_MODE_2 |
+			RX_EVENT_DRIVEN_MODE_2;
+		dbg_q_hdr->qhdr_rx_wm = SET;
+		dbg_q_hdr->qhdr_tx_wm = SET_WM;
+		dbg_q_hdr->qhdr_rx_req = RESET;
+		dbg_q_hdr->qhdr_tx_req = RESET;
+		dbg_q_hdr->qhdr_rx_irq_status = RESET;
+		dbg_q_hdr->qhdr_tx_irq_status = RESET;
+		break;
+
+	default:
+		CAM_ERR(CAM_HFI, "Invalid event driven mode :%u",
+			event_driven_mode);
+		break;
+	}
+
+	cam_io_w_mb((uint32_t)hfi_mem->qtbl.iova,
+		icp_base + HFI_REG_QTBL_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->sfr_buf.iova,
+		icp_base + HFI_REG_SFR_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->shmem.iova,
+		icp_base + HFI_REG_SHARED_MEM_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->shmem.len,
+		icp_base + HFI_REG_SHARED_MEM_SIZE);
+	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.iova,
+		icp_base + HFI_REG_UNCACHED_HEAP_PTR);
+	cam_io_w_mb((uint32_t)hfi_mem->sec_heap.len,
+		icp_base + HFI_REG_UNCACHED_HEAP_SIZE);
+	cam_io_w_mb((uint32_t)ICP_INIT_REQUEST_SET,
+		icp_base + HFI_REG_HOST_ICP_INIT_REQUEST);
+	cam_io_w_mb((uint32_t)hfi_mem->qdss.iova,
+		icp_base + HFI_REG_QDSS_IOVA);
+	cam_io_w_mb((uint32_t)hfi_mem->qdss.len,
+		icp_base + HFI_REG_QDSS_IOVA_SIZE);
+
+	hw_version = cam_io_r(icp_base + HFI_REG_A5_HW_VERSION);
+
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE),
+			status, (status == ICP_INIT_RESP_SUCCESS), 100, 10000);
+
+		CAM_DBG(CAM_HFI, "1: status = %u rc = %d", status, rc);
+		status = cam_io_r_mb(icp_base + HFI_REG_ICP_HOST_INIT_RESPONSE);
+		CAM_DBG(CAM_HFI, "2: status = %u rc = %d", status, rc);
+		if (status == ICP_INIT_RESP_SUCCESS)
+			break;
+
+		if (status == ICP_INIT_RESP_FAILED) {
+			CAM_ERR(CAM_HFI, "ICP Init Failed. status = %u",
+				status);
+			fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+			CAM_ERR(CAM_HFI, "fw version : [%x]", fw_version);
+			goto regions_fail;
+		}
+		retry_cnt++;
+	}
+
+	if ((retry_cnt == HFI_MAX_POLL_TRY) &&
+		(status == ICP_INIT_RESP_RESET)) {
+		CAM_ERR(CAM_HFI, "Reached Max retries. status = %u",
+				status);
+		fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+		CAM_ERR(CAM_HFI,
+			"hw version : : [%x], fw version : [%x]",
+			hw_version, fw_version);
+		goto regions_fail;
+	}
+
+	fw_version = cam_io_r(icp_base + HFI_REG_FW_VERSION);
+	CAM_DBG(CAM_HFI, "hw version : : [%x], fw version : [%x]",
+		hw_version, fw_version);
+
+	g_hfi->csr_base = icp_base;
+	g_hfi->hfi_state = HFI_READY;
+	g_hfi->cmd_q_state = true;
+	g_hfi->msg_q_state = true;
+	cam_io_w_mb((uint32_t)(INTR_ENABLE|INTR_ENABLE_WD0),
+		icp_base + HFI_REG_A5_CSR_A2HOSTINTEN);
+
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
+
+	return rc;
+regions_fail:
+	kfree(g_hfi);
+	g_hfi = NULL;
+alloc_fail:
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
+	return rc;
+}
+
+void cam_hfi_deinit(void __iomem *icp_base)
+{
+	mutex_lock(&hfi_cmd_q_mutex);
+	mutex_lock(&hfi_msg_q_mutex);
+
+	if (!g_hfi) {
+		CAM_ERR(CAM_HFI, "hfi path not established yet");
+		goto err;
+	}
+
+	g_hfi->cmd_q_state = false;
+	g_hfi->msg_q_state = false;
+
+	kzfree(g_hfi);
+	g_hfi = NULL;
+
+err:
+	mutex_unlock(&hfi_cmd_q_mutex);
+	mutex_unlock(&hfi_msg_q_mutex);
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
new file mode 100644
index 0000000..3c63973
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += icp_hw_mgr/ a5_hw/ ipe_hw/ bps_hw/
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile
new file mode 100644
index 0000000..bcc5bc2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += a5_dev.o a5_core.o a5_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
new file mode 100644
index 0000000..2aa1df2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.c
@@ -0,0 +1,476 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/elf.h>
+#include <media/cam_icp.h>
+#include "cam_io_util.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "a5_core.h"
+#include "a5_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "hfi_intf.h"
+#include "hfi_sys_defs.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static int cam_a5_cpas_vote(struct cam_a5_device_core_info *core_info,
+	struct cam_icp_cpas_vote *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+			&cpas_vote->ahb_vote);
+
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+			&cpas_vote->axi_vote);
+
+	if (rc)
+		CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
+
+	return rc;
+}
+
+static int32_t cam_icp_validate_fw(const uint8_t *elf)
+{
+	struct elf32_hdr *elf_hdr;
+
+	if (!elf) {
+		CAM_ERR(CAM_ICP, "Invalid params");
+		return -EINVAL;
+	}
+
+	elf_hdr = (struct elf32_hdr *)elf;
+
+	if (memcmp(elf_hdr->e_ident, ELFMAG, SELFMAG)) {
+		CAM_ERR(CAM_ICP, "ICP elf identifier is failed");
+		return -EINVAL;
+	}
+
+	/* check architecture */
+	if (elf_hdr->e_machine != EM_ARM) {
+		CAM_ERR(CAM_ICP, "unsupported arch");
+		return -EINVAL;
+	}
+
+	/* check elf bit format */
+	if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
+		CAM_ERR(CAM_ICP, "elf doesn't support 32 bit format");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int32_t cam_icp_get_fw_size(const uint8_t *elf, uint32_t *fw_size)
+{
+	int32_t rc = 0;
+	int32_t i = 0;
+	uint32_t num_prg_hdrs;
+	unsigned char *icp_prg_hdr_tbl;
+	uint32_t seg_mem_size = 0;
+	struct elf32_hdr *elf_hdr;
+	struct elf32_phdr *prg_hdr;
+
+	if (!elf || !fw_size) {
+		CAM_ERR(CAM_ICP, "invalid args");
+		return -EINVAL;
+	}
+
+	*fw_size = 0;
+
+	elf_hdr = (struct elf32_hdr *)elf;
+	num_prg_hdrs = elf_hdr->e_phnum;
+	icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
+	prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
+
+	if (!prg_hdr) {
+		CAM_ERR(CAM_ICP, "failed to get elf program header attr");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "num_prg_hdrs = %d", num_prg_hdrs);
+	for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
+		if (prg_hdr->p_flags == 0)
+			continue;
+
+		seg_mem_size = (prg_hdr->p_memsz + prg_hdr->p_align - 1) &
+					~(prg_hdr->p_align - 1);
+		seg_mem_size += prg_hdr->p_vaddr;
+		CAM_DBG(CAM_ICP, "memsz:%x align:%x addr:%x seg_mem_size:%x",
+			(int)prg_hdr->p_memsz, (int)prg_hdr->p_align,
+			(int)prg_hdr->p_vaddr, (int)seg_mem_size);
+		if (*fw_size < seg_mem_size)
+			*fw_size = seg_mem_size;
+
+	}
+
+	if (*fw_size == 0) {
+		CAM_ERR(CAM_ICP, "invalid elf fw file");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+static int32_t cam_icp_program_fw(const uint8_t *elf,
+		struct cam_a5_device_core_info *core_info)
+{
+	int32_t rc = 0;
+	uint32_t num_prg_hdrs;
+	unsigned char *icp_prg_hdr_tbl;
+	int32_t i = 0;
+	u8 *dest;
+	u8 *src;
+	struct elf32_hdr *elf_hdr;
+	struct elf32_phdr *prg_hdr;
+
+	elf_hdr = (struct elf32_hdr *)elf;
+	num_prg_hdrs = elf_hdr->e_phnum;
+	icp_prg_hdr_tbl = (unsigned char *)elf + elf_hdr->e_phoff;
+	prg_hdr = (struct elf32_phdr *)&icp_prg_hdr_tbl[0];
+
+	if (!prg_hdr) {
+		CAM_ERR(CAM_ICP, "failed to get elf program header attr");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_prg_hdrs; i++, prg_hdr++) {
+		if (prg_hdr->p_flags == 0)
+			continue;
+
+		CAM_DBG(CAM_ICP, "Loading FW header size: %u",
+			prg_hdr->p_filesz);
+		if (prg_hdr->p_filesz != 0) {
+			src = (u8 *)((u8 *)elf + prg_hdr->p_offset);
+			dest = (u8 *)(((u8 *)core_info->fw_kva_addr) +
+				prg_hdr->p_vaddr);
+
+			memcpy_toio(dest, src, prg_hdr->p_filesz);
+		}
+	}
+
+	return rc;
+}
+
+static int32_t cam_a5_download_fw(void *device_priv)
+{
+	int32_t rc = 0;
+	uint32_t fw_size;
+	const uint8_t *fw_start = NULL;
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	struct platform_device         *pdev = NULL;
+	struct a5_soc_info *cam_a5_soc_info = NULL;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+	pdev = soc_info->pdev;
+	cam_a5_soc_info = soc_info->soc_private;
+
+	rc = request_firmware(&core_info->fw_elf, "CAMERA_ICP.elf", &pdev->dev);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Failed to locate fw: %d", rc);
+		return rc;
+	}
+
+	if (!core_info->fw_elf) {
+		CAM_ERR(CAM_ICP, "Invalid elf size");
+		rc = -EINVAL;
+		goto fw_download_failed;
+	}
+
+	fw_start = core_info->fw_elf->data;
+	rc = cam_icp_validate_fw(fw_start);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "fw elf validation failed");
+		goto fw_download_failed;
+	}
+
+	rc = cam_icp_get_fw_size(fw_start, &fw_size);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to get fw size");
+		goto fw_download_failed;
+	}
+
+	if (core_info->fw_buf_len < fw_size) {
+		CAM_ERR(CAM_ICP, "mismatch in fw size: %u %llu",
+			fw_size, core_info->fw_buf_len);
+		rc = -EINVAL;
+		goto fw_download_failed;
+	}
+
+	rc = cam_icp_program_fw(fw_start, core_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "fw program is failed");
+		goto fw_download_failed;
+	}
+
+fw_download_failed:
+	release_firmware(core_info->fw_elf);
+	return rc;
+}
+
+int cam_a5_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info: %pK core_info: %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.compressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
+	cpas_vote.axi_vote.uncompressed_bw = CAM_ICP_A5_BW_BYTES_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_start = true;
+
+	rc = cam_a5_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	}
+
+	return rc;
+}
+
+int cam_a5_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_a5_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
+
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_a5_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *a5_dev = data;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	uint32_t irq_status = 0;
+
+	if (!data) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info or query_cap args");
+		return IRQ_HANDLED;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+
+	irq_status = cam_io_r_mb(soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+				core_info->a5_hw_info->a5_host_int_status);
+
+	cam_io_w_mb(irq_status,
+			soc_info->reg_map[A5_SIERRA_BASE].mem_base +
+			core_info->a5_hw_info->a5_host_int_clr);
+
+	if ((irq_status & A5_WDT_0) ||
+		(irq_status & A5_WDT_1)) {
+		CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
+	}
+
+	spin_lock(&a5_dev->hw_lock);
+	if (core_info->irq_cb.icp_hw_mgr_cb)
+		core_info->irq_cb.icp_hw_mgr_cb(irq_status,
+					core_info->irq_cb.data);
+	spin_unlock(&a5_dev->hw_lock);
+
+	return IRQ_HANDLED;
+}
+
+int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *a5_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+	struct a5_soc_info *a5_soc = NULL;
+	unsigned long flags;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_ICP_A5_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &a5_dev->soc_info;
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+	hw_info = core_info->a5_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ICP_A5_CMD_FW_DOWNLOAD:
+		rc = cam_a5_download_fw(device_priv);
+		break;
+	case CAM_ICP_A5_CMD_SET_FW_BUF: {
+		struct cam_icp_a5_set_fw_buf_info *fw_buf_info = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		core_info->fw_buf = fw_buf_info->iova;
+		core_info->fw_kva_addr = fw_buf_info->kva;
+		core_info->fw_buf_len = fw_buf_info->len;
+
+		CAM_DBG(CAM_ICP, "fw buf info = %x %llx %lld",
+			core_info->fw_buf, core_info->fw_kva_addr,
+			core_info->fw_buf_len);
+		break;
+	}
+	case CAM_ICP_A5_SET_IRQ_CB: {
+		struct cam_icp_a5_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		spin_lock_irqsave(&a5_dev->hw_lock, flags);
+		core_info->irq_cb.icp_hw_mgr_cb = irq_cb->icp_hw_mgr_cb;
+		core_info->irq_cb.data = irq_cb->data;
+		spin_unlock_irqrestore(&a5_dev->hw_lock, flags);
+		break;
+	}
+
+	case CAM_ICP_A5_SEND_INIT:
+		hfi_send_system_cmd(HFI_CMD_SYS_INIT, 0, 0);
+		break;
+
+	case CAM_ICP_A5_CMD_PC_PREP:
+		hfi_send_system_cmd(HFI_CMD_SYS_PC_PREP, 0, 0);
+		break;
+
+	case CAM_ICP_A5_CMD_VOTE_CPAS: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		cam_a5_cpas_vote(core_info, cpas_vote);
+		break;
+	}
+
+	case CAM_ICP_A5_CMD_CPAS_START: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		if (!core_info->cpas_start) {
+			rc = cam_cpas_start(core_info->cpas_handle,
+					&cpas_vote->ahb_vote,
+					&cpas_vote->axi_vote);
+			core_info->cpas_start = true;
+		}
+		break;
+	}
+
+	case CAM_ICP_A5_CMD_CPAS_STOP:
+		if (core_info->cpas_start) {
+			cam_cpas_stop(core_info->cpas_handle);
+			core_info->cpas_start = false;
+		}
+		break;
+	case CAM_ICP_A5_CMD_UBWC_CFG:
+		a5_soc = soc_info->soc_private;
+		if (!a5_soc) {
+			CAM_ERR(CAM_ICP, "A5 private soc info is NULL");
+			return -EINVAL;
+		}
+		rc = hfi_cmd_ubwc_config(a5_soc->ubwc_cfg);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
new file mode 100644
index 0000000..5e93311
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_core.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_A5_CORE_H
+#define CAM_A5_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include "cam_a5_hw_intf.h"
+
+#define A5_QGIC_BASE            0
+#define A5_SIERRA_BASE          1
+#define A5_CSR_BASE             2
+
+#define A5_HOST_INT             0x1
+#define A5_WDT_0                0x2
+#define A5_WDT_1                0x4
+
+#define ELF_GUARD_PAGE          (2 * 1024 * 1024)
+
+struct cam_a5_device_hw_info {
+	uint32_t hw_ver;
+	uint32_t nsec_reset;
+	uint32_t a5_control;
+	uint32_t a5_host_int_en;
+	uint32_t a5_host_int;
+	uint32_t a5_host_int_clr;
+	uint32_t a5_host_int_status;
+	uint32_t a5_host_int_set;
+	uint32_t host_a5_int;
+	uint32_t fw_version;
+	uint32_t init_req;
+	uint32_t init_response;
+	uint32_t shared_mem_ptr;
+	uint32_t shared_mem_size;
+	uint32_t qtbl_ptr;
+	uint32_t uncached_heap_ptr;
+	uint32_t uncached_heap_size;
+	uint32_t a5_status;
+};
+
+/**
+ * struct cam_a5_device_hw_info
+ * @a5_hw_info: A5 hardware info
+ * @fw_elf: start address of fw start with elf header
+ * @fw: start address of fw blob
+ * @fw_buf: smmu alloc/mapped fw buffer
+ * @fw_buf_len: fw buffer length
+ * @query_cap: A5 query info from firmware
+ * @a5_acquire: Acquire information of A5
+ * @irq_cb: IRQ callback
+ * @cpas_handle: CPAS handle for A5
+ * @cpast_start: state variable for cpas
+ */
+struct cam_a5_device_core_info {
+	struct cam_a5_device_hw_info *a5_hw_info;
+	const struct firmware *fw_elf;
+	void *fw;
+	uint32_t fw_buf;
+	uintptr_t fw_kva_addr;
+	uint64_t fw_buf_len;
+	struct cam_icp_a5_query_cap query_cap;
+	struct cam_icp_a5_acquire_dev a5_acquire[8];
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	uint32_t cpas_handle;
+	bool cpas_start;
+};
+
+int cam_a5_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_a5_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_a5_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_a5_irq(int irq_num, void *data);
+#endif /* CAM_A5_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
new file mode 100644
index 0000000..0554e37
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_dev.c
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "a5_core.h"
+#include "a5_soc.h"
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+struct a5_soc_info cam_a5_soc_info;
+EXPORT_SYMBOL(cam_a5_soc_info);
+
+struct cam_a5_device_hw_info cam_a5_hw_info = {
+	.hw_ver = 0x0,
+	.nsec_reset = 0x4,
+	.a5_control = 0x8,
+	.a5_host_int_en = 0x10,
+	.a5_host_int = 0x14,
+	.a5_host_int_clr = 0x18,
+	.a5_host_int_status = 0x1c,
+	.a5_host_int_set = 0x20,
+	.host_a5_int = 0x30,
+	.fw_version = 0x44,
+	.init_req = 0x48,
+	.init_response = 0x4c,
+	.shared_mem_ptr = 0x50,
+	.shared_mem_size = 0x54,
+	.qtbl_ptr = 0x58,
+	.uncached_heap_ptr = 0x5c,
+	.uncached_heap_size = 0x60,
+	.a5_status = 0x200,
+};
+EXPORT_SYMBOL(cam_a5_hw_info);
+
+static bool cam_a5_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	bool error_handled = false;
+
+	if (!irq_data)
+		return error_handled;
+
+	switch (irq_data->irq_type) {
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+			irq_data->irq_type,
+			irq_data->u.dec_err.decerr_status.value,
+			irq_data->u.dec_err.decerr_status.thr_err,
+			irq_data->u.dec_err.decerr_status.fcl_err,
+			irq_data->u.dec_err.decerr_status.len_md_err,
+			irq_data->u.dec_err.decerr_status.format_err);
+		error_handled = true;
+		break;
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Encode error type=%d status=%x",
+			irq_data->irq_type,
+			irq_data->u.enc_err.encerr_status.value);
+		error_handled = true;
+		break;
+	default:
+		break;
+	}
+
+	return error_handled;
+}
+
+int cam_a5_register_cpas(struct cam_hw_soc_info *soc_info,
+			struct cam_a5_device_core_info *core_info,
+			uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "icp", sizeof("icp"));
+	cpas_register_params.cam_cpas_client_cb = cam_a5_cpas_cb;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed: %d", rc);
+		return rc;
+	}
+
+	core_info->cpas_handle = cpas_register_params.client_handle;
+	return rc;
+}
+
+int cam_a5_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_a5_device_core_info *core_info = NULL;
+	struct cam_a5_device_hw_info *hw_info = NULL;
+
+	a5_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!a5_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &a5_dev_intf->hw_idx);
+
+	a5_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!a5_dev) {
+		rc = -ENOMEM;
+		goto a5_dev_alloc_failure;
+	}
+
+	a5_dev->soc_info.pdev = pdev;
+	a5_dev->soc_info.dev = &pdev->dev;
+	a5_dev->soc_info.dev_name = pdev->name;
+	a5_dev_intf->hw_priv = a5_dev;
+	a5_dev_intf->hw_ops.init = cam_a5_init_hw;
+	a5_dev_intf->hw_ops.deinit = cam_a5_deinit_hw;
+	a5_dev_intf->hw_ops.process_cmd = cam_a5_process_cmd;
+	a5_dev_intf->hw_type = CAM_ICP_DEV_A5;
+
+	CAM_DBG(CAM_ICP, "type %d index %d",
+		a5_dev_intf->hw_type,
+		a5_dev_intf->hw_idx);
+
+	platform_set_drvdata(pdev, a5_dev_intf);
+
+	a5_dev->core_info = kzalloc(sizeof(struct cam_a5_device_core_info),
+					GFP_KERNEL);
+	if (!a5_dev->core_info) {
+		rc = -ENOMEM;
+		goto core_info_alloc_failure;
+	}
+	core_info = (struct cam_a5_device_core_info *)a5_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ICP, "No a5 hardware info");
+		rc = -EINVAL;
+		goto match_err;
+	}
+	hw_info = (struct cam_a5_device_hw_info *)match_dev->data;
+	core_info->a5_hw_info = hw_info;
+
+	a5_dev->soc_info.soc_private = &cam_a5_soc_info;
+
+	rc = cam_a5_init_soc_resources(&a5_dev->soc_info, cam_a5_irq,
+		a5_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed to init_soc");
+		goto init_soc_failure;
+	}
+
+	CAM_DBG(CAM_ICP, "soc info : %pK",
+				(void *)&a5_dev->soc_info);
+	rc = cam_a5_register_cpas(&a5_dev->soc_info,
+			core_info, a5_dev_intf->hw_idx);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "a5 cpas registration failed");
+		goto cpas_reg_failed;
+	}
+	a5_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&a5_dev->hw_mutex);
+	spin_lock_init(&a5_dev->hw_lock);
+	init_completion(&a5_dev->hw_complete);
+
+	CAM_DBG(CAM_ICP, "A5%d probe successful",
+		a5_dev_intf->hw_idx);
+	return 0;
+
+cpas_reg_failed:
+init_soc_failure:
+match_err:
+	kfree(a5_dev->core_info);
+core_info_alloc_failure:
+	kfree(a5_dev);
+a5_dev_alloc_failure:
+	kfree(a5_dev_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_a5_dt_match[] = {
+	{
+		.compatible = "qcom,cam-a5",
+		.data = &cam_a5_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_a5_dt_match);
+
+static struct platform_driver cam_a5_driver = {
+	.probe = cam_a5_probe,
+	.driver = {
+		.name = "cam-a5",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_a5_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_a5_init_module(void)
+{
+	return platform_driver_register(&cam_a5_driver);
+}
+
+static void __exit cam_a5_exit_module(void)
+{
+	platform_driver_unregister(&cam_a5_driver);
+}
+
+module_init(cam_a5_init_module);
+module_exit(cam_a5_exit_module);
+MODULE_DESCRIPTION("CAM A5 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
new file mode 100644
index 0000000..c0aa6ad
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "a5_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+static int cam_a5_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0, i;
+	const char *fw_name;
+	struct a5_soc_info *camp_a5_soc_info;
+	struct device_node *of_node = NULL;
+	struct platform_device *pdev = NULL;
+	int num_ubwc_cfg;
+
+	pdev = soc_info->pdev;
+	of_node = pdev->dev.of_node;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "get a5 dt prop is failed");
+		return rc;
+	}
+
+	camp_a5_soc_info = soc_info->soc_private;
+	fw_name = camp_a5_soc_info->fw_name;
+
+	rc = of_property_read_string(of_node, "fw_name", &fw_name);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "fw_name read failed");
+		goto end;
+	}
+
+	num_ubwc_cfg = of_property_count_u32_elems(of_node, "ubwc-cfg");
+	if ((num_ubwc_cfg < 0) || (num_ubwc_cfg > ICP_UBWC_MAX)) {
+		CAM_ERR(CAM_ICP, "wrong ubwc_cfg: %d", num_ubwc_cfg);
+		rc = num_ubwc_cfg;
+		goto end;
+	}
+
+	for (i = 0; i < num_ubwc_cfg; i++) {
+		rc = of_property_read_u32_index(of_node, "ubwc-cfg",
+			i, &camp_a5_soc_info->ubwc_cfg[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ICP, "unable to read ubwc cfg values");
+			break;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_a5_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, a5_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_a5_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_a5_request_platform_resource(soc_info, a5_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_ICP, "enable platform failed");
+
+	return rc;
+}
+
+int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		CAM_ERR(CAM_ICP, "disable platform failed");
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
new file mode 100644
index 0000000..560af46
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/a5_soc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_A5_SOC_H
+#define CAM_A5_SOC_H
+
+#include "cam_soc_util.h"
+
+#define ICP_UBWC_MAX 2
+
+struct a5_soc_info {
+	char *fw_name;
+	uint32_t ubwc_cfg[ICP_UBWC_MAX];
+};
+
+int cam_a5_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t a5_irq_handler, void *irq_data);
+
+int cam_a5_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_a5_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile
new file mode 100644
index 0000000..8ac5a44
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += bps_dev.o bps_core.o bps_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
new file mode 100644
index 0000000..eb99f6c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "bps_core.h"
+#include "bps_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_bps_hw_intf.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "hfi_reg.h"
+
+#define HFI_MAX_POLL_TRY 5
+
+static int cam_bps_cpas_vote(struct cam_bps_device_core_info *core_info,
+			struct cam_icp_cpas_vote *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+				&cpas_vote->ahb_vote);
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+				&cpas_vote->axi_vote);
+
+	if (rc < 0)
+		CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
+
+	return rc;
+}
+
+
+int cam_bps_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+			&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_start = true;
+
+	rc = cam_bps_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "soc enable is failed: %d", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	} else {
+		core_info->clk_enable = true;
+	}
+
+	return rc;
+}
+
+int cam_bps_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_bps_disable_soc_resources(soc_info, core_info->clk_enable);
+	if (rc)
+		CAM_ERR(CAM_ICP, "soc disable is failed: %d", rc);
+	core_info->clk_enable = false;
+
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	}
+
+	return rc;
+}
+
+static int cam_bps_handle_pc(struct cam_hw_info *bps_dev)
+{
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_bps_device_hw_info *hw_info = NULL;
+	int pwr_ctrl;
+	int pwr_status;
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	hw_info = core_info->bps_hw_info;
+
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+		true, &pwr_ctrl);
+	if (!(pwr_ctrl & BPS_COLLAPSE_MASK)) {
+		cam_cpas_reg_read(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+			true, &pwr_status);
+
+		cam_cpas_reg_write(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP,
+			hw_info->pwr_ctrl, true, 0x1);
+
+		if ((pwr_status >> BPS_PWR_ON_MASK)) {
+			CAM_ERR(CAM_ICP, "BPS: pwr_status(%x):pwr_ctrl(%x)",
+				pwr_status, pwr_ctrl);
+			return -EINVAL;
+		}
+	}
+	cam_bps_get_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true,
+		&pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+		true, &pwr_status);
+	CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	return 0;
+}
+
+static int cam_bps_handle_resume(struct cam_hw_info *bps_dev)
+{
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_bps_device_hw_info *hw_info = NULL;
+	int pwr_ctrl;
+	int pwr_status;
+	int rc = 0;
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	hw_info = core_info->bps_hw_info;
+
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
+	if (pwr_ctrl & BPS_COLLAPSE_MASK) {
+		CAM_DBG(CAM_ICP, "BPS: pwr_ctrl set(%x)", pwr_ctrl);
+		cam_cpas_reg_write(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP,
+			hw_info->pwr_ctrl, true, 0);
+	}
+
+	rc = cam_bps_transfer_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_status, true, &pwr_status);
+	CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	return rc;
+}
+
+static int cam_bps_cmd_reset(struct cam_hw_soc_info *soc_info,
+	struct cam_bps_device_core_info *core_info)
+{
+	uint32_t retry_cnt = 0;
+	uint32_t status = 0;
+	int pwr_ctrl, pwr_status, rc = 0;
+	bool reset_bps_cdm_fail = false;
+	bool reset_bps_top_fail = false;
+
+	CAM_DBG(CAM_ICP, "CAM_ICP_BPS_CMD_RESET");
+	/* Reset BPS CDM core*/
+	cam_io_w_mb((uint32_t)0xF,
+		soc_info->reg_map[0].mem_base + BPS_CDM_RST_CMD);
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((soc_info->reg_map[0].mem_base +
+			BPS_CDM_IRQ_STATUS),
+			status, ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+			100, 10000);
+
+		CAM_DBG(CAM_ICP, "bps_cdm_irq_status = %u", status);
+
+		if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+			break;
+		retry_cnt++;
+	}
+	status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		BPS_CDM_IRQ_STATUS);
+	if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+		CAM_ERR(CAM_ICP, "BPS CDM rst failed status 0x%x", status);
+		reset_bps_cdm_fail = true;
+	}
+
+	/* Reset BPS core*/
+	status = 0;
+	cam_io_w_mb((uint32_t)0x3,
+		soc_info->reg_map[0].mem_base + BPS_TOP_RST_CMD);
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((soc_info->reg_map[0].mem_base +
+			BPS_TOP_IRQ_STATUS),
+			status, ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+			100, 10000);
+
+		CAM_DBG(CAM_ICP, "bps_top_irq_status = %u", status);
+
+		if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+			break;
+		retry_cnt++;
+	}
+	status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		BPS_TOP_IRQ_STATUS);
+	if ((status & BPS_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+		CAM_ERR(CAM_ICP, "BPS top rst failed status 0x%x", status);
+		reset_bps_top_fail = true;
+	}
+
+	cam_bps_get_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, core_info->bps_hw_info->pwr_ctrl,
+		true, &pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, core_info->bps_hw_info->pwr_status,
+		true, &pwr_status);
+	CAM_DBG(CAM_ICP, "(After) pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	if (reset_bps_cdm_fail || reset_bps_top_fail)
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *bps_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_bps_device_core_info *core_info = NULL;
+	struct cam_bps_device_hw_info *hw_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_ICP_BPS_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &bps_dev->soc_info;
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+	hw_info = core_info->bps_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ICP_BPS_CMD_VOTE_CPAS: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		cam_bps_cpas_vote(core_info, cpas_vote);
+		break;
+	}
+
+	case CAM_ICP_BPS_CMD_CPAS_START: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_ICP, "cmd args NULL");
+			return -EINVAL;
+		}
+
+		if (!core_info->cpas_start) {
+			rc = cam_cpas_start(core_info->cpas_handle,
+					&cpas_vote->ahb_vote,
+					&cpas_vote->axi_vote);
+			core_info->cpas_start = true;
+		}
+		break;
+	}
+
+	case CAM_ICP_BPS_CMD_CPAS_STOP:
+		if (core_info->cpas_start) {
+			cam_cpas_stop(core_info->cpas_handle);
+			core_info->cpas_start = false;
+		}
+		break;
+	case CAM_ICP_BPS_CMD_POWER_COLLAPSE:
+		rc = cam_bps_handle_pc(bps_dev);
+		break;
+	case CAM_ICP_BPS_CMD_POWER_RESUME:
+		rc = cam_bps_handle_resume(bps_dev);
+		break;
+	case CAM_ICP_BPS_CMD_UPDATE_CLK: {
+		struct cam_a5_clk_update_cmd *clk_upd_cmd =
+			(struct cam_a5_clk_update_cmd *)cmd_args;
+		uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
+
+		CAM_DBG(CAM_ICP, "bps_src_clk rate = %d", (int)clk_rate);
+
+		if (!core_info->clk_enable) {
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				cam_bps_handle_pc(bps_dev);
+				cam_cpas_reg_write(core_info->cpas_handle,
+					CAM_CPAS_REG_CPASTOP,
+					hw_info->pwr_ctrl, true, 0x0);
+			}
+			rc = cam_bps_toggle_clk(soc_info, true);
+			if (rc)
+				CAM_ERR(CAM_ICP, "Enable failed");
+			else
+				core_info->clk_enable = true;
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				rc = cam_bps_handle_resume(bps_dev);
+				if (rc)
+					CAM_ERR(CAM_ICP, "BPS resume failed");
+			}
+		}
+		CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
+		rc = cam_bps_update_clk_rate(soc_info, clk_rate);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed to update clk");
+		}
+		break;
+	case CAM_ICP_BPS_CMD_DISABLE_CLK:
+		if (core_info->clk_enable == true)
+			cam_bps_toggle_clk(soc_info, false);
+		core_info->clk_enable = false;
+		break;
+	case CAM_ICP_BPS_CMD_RESET:
+		rc = cam_bps_cmd_reset(soc_info, core_info);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+irqreturn_t cam_bps_irq(int irq_num, void *data)
+{
+	return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
new file mode 100644
index 0000000..a65ebad
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_core.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_BPS_CORE_H
+#define CAM_BPS_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+#define BPS_COLLAPSE_MASK 0x1
+#define BPS_PWR_ON_MASK   0x2
+
+struct cam_bps_device_hw_info {
+	uint32_t hw_idx;
+	uint32_t pwr_ctrl;
+	uint32_t pwr_status;
+	uint32_t reserved;
+};
+
+struct cam_bps_device_core_info {
+	struct cam_bps_device_hw_info *bps_hw_info;
+	uint32_t cpas_handle;
+	bool cpas_start;
+	bool clk_enable;
+};
+
+int cam_bps_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_bps_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_bps_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_bps_irq(int irq_num, void *data);
+#endif /* CAM_BPS_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
new file mode 100644
index 0000000..36bd698
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_dev.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "bps_core.h"
+#include "bps_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_bps_device_hw_info cam_bps_hw_info = {
+	.hw_idx = 0,
+	.pwr_ctrl = 0x5c,
+	.pwr_status = 0x58,
+	.reserved = 0,
+};
+EXPORT_SYMBOL(cam_bps_hw_info);
+
+static char bps_dev_name[8];
+
+static bool cam_bps_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	bool error_handled = false;
+
+	if (!irq_data)
+		return error_handled;
+
+	switch (irq_data->irq_type) {
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_DECODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Decode error type=%d status=%x thr_err=%d, fcl_err=%d, len_md_err=%d, format_err=%d",
+			irq_data->irq_type,
+			irq_data->u.dec_err.decerr_status.value,
+			irq_data->u.dec_err.decerr_status.thr_err,
+			irq_data->u.dec_err.decerr_status.fcl_err,
+			irq_data->u.dec_err.decerr_status.len_md_err,
+			irq_data->u.dec_err.decerr_status.format_err);
+		error_handled = true;
+		break;
+	case CAM_CAMNOC_IRQ_IPE_BPS_UBWC_ENCODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ICP,
+			"IPE/BPS UBWC Encode error type=%d status=%x",
+			irq_data->irq_type,
+			irq_data->u.enc_err.encerr_status.value);
+		error_handled = true;
+		break;
+	default:
+		break;
+	}
+
+	return error_handled;
+}
+
+int cam_bps_register_cpas(struct cam_hw_soc_info *soc_info,
+			struct cam_bps_device_core_info *core_info,
+			uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "bps", sizeof("bps"));
+	cpas_register_params.cam_cpas_client_cb = cam_bps_cpas_cb;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+int cam_bps_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info            *bps_dev = NULL;
+	struct cam_hw_intf            *bps_dev_intf = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_bps_device_core_info   *core_info = NULL;
+	struct cam_bps_device_hw_info     *hw_info = NULL;
+	int                                rc = 0;
+
+	bps_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!bps_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &bps_dev_intf->hw_idx);
+
+	bps_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!bps_dev) {
+		kfree(bps_dev_intf);
+		return -ENOMEM;
+	}
+
+	memset(bps_dev_name, 0, sizeof(bps_dev_name));
+	snprintf(bps_dev_name, sizeof(bps_dev_name),
+		"bps%1u", bps_dev_intf->hw_idx);
+
+	bps_dev->soc_info.pdev = pdev;
+	bps_dev->soc_info.dev = &pdev->dev;
+	bps_dev->soc_info.dev_name = bps_dev_name;
+	bps_dev_intf->hw_priv = bps_dev;
+	bps_dev_intf->hw_ops.init = cam_bps_init_hw;
+	bps_dev_intf->hw_ops.deinit = cam_bps_deinit_hw;
+	bps_dev_intf->hw_ops.process_cmd = cam_bps_process_cmd;
+	bps_dev_intf->hw_type = CAM_ICP_DEV_BPS;
+	platform_set_drvdata(pdev, bps_dev_intf);
+	bps_dev->core_info = kzalloc(sizeof(struct cam_bps_device_core_info),
+					GFP_KERNEL);
+	if (!bps_dev->core_info) {
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return -ENOMEM;
+	}
+	core_info = (struct cam_bps_device_core_info *)bps_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ICP, "No bps hardware info");
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		rc = -EINVAL;
+		return rc;
+	}
+	hw_info = &cam_bps_hw_info;
+	core_info->bps_hw_info = hw_info;
+
+	rc = cam_bps_init_soc_resources(&bps_dev->soc_info, cam_bps_irq,
+		bps_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed to init_soc");
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return rc;
+	}
+	CAM_DBG(CAM_ICP, "soc info : %pK",
+		(void *)&bps_dev->soc_info);
+
+	rc = cam_bps_register_cpas(&bps_dev->soc_info,
+			core_info, bps_dev_intf->hw_idx);
+	if (rc < 0) {
+		kfree(bps_dev->core_info);
+		kfree(bps_dev);
+		kfree(bps_dev_intf);
+		return rc;
+	}
+	bps_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&bps_dev->hw_mutex);
+	spin_lock_init(&bps_dev->hw_lock);
+	init_completion(&bps_dev->hw_complete);
+	CAM_DBG(CAM_ICP, "BPS%d probe successful",
+		bps_dev_intf->hw_idx);
+
+	return rc;
+}
+
+static const struct of_device_id cam_bps_dt_match[] = {
+	{
+		.compatible = "qcom,cam-bps",
+		.data = &cam_bps_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_bps_dt_match);
+
+static struct platform_driver cam_bps_driver = {
+	.probe = cam_bps_probe,
+	.driver = {
+		.name = "cam-bps",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_bps_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_bps_init_module(void)
+{
+	return platform_driver_register(&cam_bps_driver);
+}
+
+static void __exit cam_bps_exit_module(void)
+{
+	platform_driver_unregister(&cam_bps_driver);
+}
+
+module_init(cam_bps_init_module);
+module_exit(cam_bps_exit_module);
+MODULE_DESCRIPTION("CAM BPS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
new file mode 100644
index 0000000..cd525e1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.c
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "bps_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+static int cam_bps_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ICP, "get bps dt prop is failed");
+
+	return rc;
+}
+
+static int cam_bps_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, bps_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_bps_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_bps_request_platform_resource(soc_info, bps_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, false);
+	if (rc)
+		CAM_ERR(CAM_ICP, "enable platform failed");
+
+	return rc;
+}
+
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+		false);
+	if (rc)
+		CAM_ERR(CAM_ICP, "disable platform failed");
+
+	return rc;
+}
+
+int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_set_mode(soc_info->rgltr[i],
+			REGULATOR_MODE_FAST);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+				soc_info->rgltr_name[i]);
+			goto rgltr_set_mode_failed;
+		}
+	}
+	return 0;
+
+rgltr_set_mode_failed:
+	for (i = i - 1; i >= 0; i--)
+		if (soc_info->rgltr[i])
+			regulator_set_mode(soc_info->rgltr[i],
+					REGULATOR_MODE_NORMAL);
+
+	return rc;
+}
+
+int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_set_mode(soc_info->rgltr[i],
+			REGULATOR_MODE_NORMAL);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+				soc_info->rgltr_name[i]);
+			goto rgltr_set_mode_failed;
+		}
+	}
+	return 0;
+
+rgltr_set_mode_failed:
+	for (i = i - 1; i >= 0; i--)
+		if (soc_info->rgltr[i])
+			regulator_set_mode(soc_info->rgltr[i],
+					REGULATOR_MODE_FAST);
+
+	return rc;
+}
+
+int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate)
+{
+	int32_t src_clk_idx;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	src_clk_idx = soc_info->src_clk_idx;
+
+	if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+		(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+		(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+		CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+			clk_rate,
+			soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+		clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+	}
+
+	return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+}
+
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+	int rc = 0;
+
+	if (clk_enable)
+		rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+	else
+		cam_soc_util_clk_disable_default(soc_info);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
new file mode 100644
index 0000000..5029097
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/bps_hw/bps_soc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_BPS_SOC_H_
+#define _CAM_BPS_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_bps_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t bps_irq_handler, void *irq_data);
+
+int cam_bps_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk);
+
+int cam_bps_get_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_transfer_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_bps_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate);
+int cam_bps_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
+#endif /* _CAM_BPS_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
new file mode 100644
index 0000000..272afc1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/isp/isp_hw/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/a5_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_icp_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
new file mode 100644
index 0000000..5f5738d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.c
@@ -0,0 +1,4708 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/timer.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include <media/cam_cpas.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_icp_hw_mgr.h"
+#include "cam_a5_hw_intf.h"
+#include "cam_bps_hw_intf.h"
+#include "cam_ipe_hw_intf.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "hfi_intf.h"
+#include "hfi_reg.h"
+#include "hfi_session_defs.h"
+#include "hfi_sys_defs.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "a5_core.h"
+#include "hfi_sys_defs.h"
+#include "cam_debug_util.h"
+#include "cam_soc_util.h"
+#include "cam_trace.h"
+#include "cam_cpas_api.h"
+#include "cam_common_util.h"
+
+#define ICP_WORKQ_TASK_CMD_TYPE 1
+#define ICP_WORKQ_TASK_MSG_TYPE 2
+
+#define ICP_DEV_TYPE_TO_CLK_TYPE(dev_type) \
+	((dev_type == CAM_ICP_RES_TYPE_BPS) ? ICP_CLK_HW_BPS : ICP_CLK_HW_IPE)
+
+#define ICP_DEVICE_IDLE_TIMEOUT 400
+
+static struct cam_icp_hw_mgr icp_hw_mgr;
+
+static int cam_icp_send_ubwc_cfg(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	int rc;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is NULL");
+		return -EINVAL;
+	}
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_UBWC_CFG, NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_ICP, "CAM_ICP_A5_CMD_UBWC_CFG is failed");
+
+	return rc;
+}
+
+static void cam_icp_hw_mgr_clk_info_update(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct cam_icp_clk_info *hw_mgr_clk_info;
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+	else
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+
+	if (hw_mgr_clk_info->base_clk >= ctx_data->clk_info.base_clk)
+		hw_mgr_clk_info->base_clk -= ctx_data->clk_info.base_clk;
+}
+
+static void cam_icp_hw_mgr_reset_clk_info(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int i;
+
+	for (i = 0; i < ICP_CLK_HW_MAX; i++) {
+		hw_mgr->clk_info[i].base_clk = 0;
+		hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
+		hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
+		hw_mgr->clk_info[i].over_clked = 0;
+		hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	}
+	hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
+}
+
+static int cam_icp_get_actual_clk_rate_idx(
+	struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+	int i;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++)
+		if (ctx_data->clk_info.clk_rate[i] >= base_clk)
+			return i;
+
+	/*
+	 * Caller has to ensure returned index is within array
+	 * size bounds while accessing that index.
+	 */
+
+	return i;
+}
+
+static bool cam_icp_is_over_clk(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+	int base_clk_idx;
+	int curr_clk_idx;
+
+	base_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data,
+		hw_mgr_clk_info->base_clk);
+
+	curr_clk_idx = cam_icp_get_actual_clk_rate_idx(ctx_data,
+		hw_mgr_clk_info->curr_clk);
+
+	CAM_DBG(CAM_ICP, "bc_idx = %d cc_idx = %d %d %d",
+		base_clk_idx, curr_clk_idx, hw_mgr_clk_info->base_clk,
+		hw_mgr_clk_info->curr_clk);
+
+	if (curr_clk_idx > base_clk_idx)
+		return true;
+
+	return false;
+}
+
+static int cam_icp_get_lower_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+	int i;
+
+	i = cam_icp_get_actual_clk_rate_idx(ctx_data, base_clk);
+
+	if (i > 0)
+		return ctx_data->clk_info.clk_rate[i - 1];
+
+	CAM_DBG(CAM_ICP, "Already clk at lower level");
+	return base_clk;
+}
+
+static int cam_icp_get_next_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+	int i;
+
+	i = cam_icp_get_actual_clk_rate_idx(ctx_data, base_clk);
+
+	if (i < CAM_MAX_VOTE - 1)
+		return ctx_data->clk_info.clk_rate[i + 1];
+
+	CAM_DBG(CAM_ICP, "Already clk at higher level");
+
+	return base_clk;
+}
+
+static int cam_icp_get_actual_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, uint32_t base_clk)
+{
+	int i;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++)
+		if (ctx_data->clk_info.clk_rate[i] >= base_clk)
+			return ctx_data->clk_info.clk_rate[i];
+
+	return base_clk;
+}
+
+static int cam_icp_supported_clk_rates(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_hw_info *dev = NULL;
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		dev_intf = hw_mgr->bps_dev_intf;
+	else
+		dev_intf = hw_mgr->ipe0_dev_intf;
+
+	if (!dev_intf) {
+		CAM_ERR(CAM_ICP, "dev_intf is invalid");
+		return -EINVAL;
+	}
+	dev = (struct cam_hw_info *)dev_intf->hw_priv;
+	soc_info = &dev->soc_info;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++) {
+		ctx_data->clk_info.clk_rate[i] =
+			soc_info->clk_rate[i][soc_info->src_clk_idx];
+		CAM_DBG(CAM_ICP, "clk_info[%d] = %d",
+			i, ctx_data->clk_info.clk_rate[i]);
+	}
+
+	return 0;
+}
+
+static int cam_icp_clk_idx_from_req_id(struct cam_icp_hw_ctx_data *ctx_data,
+	uint64_t req_id)
+{
+	struct hfi_frame_process_info *frame_process;
+	int i;
+
+	frame_process = &ctx_data->hfi_frame_process;
+
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+		if (frame_process->request_id[i] == req_id)
+			return i;
+
+	return 0;
+}
+
+static int cam_icp_ctx_clk_info_init(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	ctx_data->clk_info.curr_fc = 0;
+	ctx_data->clk_info.base_clk = 0;
+	ctx_data->clk_info.uncompressed_bw = 0;
+	ctx_data->clk_info.compressed_bw = 0;
+	cam_icp_supported_clk_rates(&icp_hw_mgr, ctx_data);
+
+	return 0;
+}
+
+static bool cam_icp_frame_pending(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	return !bitmap_empty(ctx_data->hfi_frame_process.bitmap,
+			CAM_FRAME_CMD_MAX);
+}
+
+static int cam_icp_ctx_timer_reset(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	if (ctx_data && ctx_data->watch_dog) {
+		ctx_data->watch_dog_reset_counter++;
+		CAM_DBG(CAM_ICP, "reset timer : ctx_id = %d, counter=%d",
+			ctx_data->ctx_id, ctx_data->watch_dog_reset_counter);
+		crm_timer_reset(ctx_data->watch_dog);
+	}
+
+	return 0;
+}
+
+static void cam_icp_device_timer_reset(struct cam_icp_hw_mgr *hw_mgr,
+	int device_index)
+{
+	if ((device_index >= ICP_CLK_HW_MAX) || (!hw_mgr))
+		return;
+
+	if (hw_mgr->clk_info[device_index].watch_dog) {
+		CAM_DBG(CAM_ICP, "reset timer : device_index = %d",
+			device_index);
+		crm_timer_reset(hw_mgr->clk_info[device_index].watch_dog);
+		hw_mgr->clk_info[device_index].watch_dog_reset_counter++;
+	}
+}
+
+static int32_t cam_icp_deinit_idle_clk(void *priv, void *data)
+{
+	struct cam_icp_hw_mgr *hw_mgr = (struct cam_icp_hw_mgr *)priv;
+	struct clk_work_data *task_data = (struct clk_work_data *)data;
+	struct cam_icp_clk_info *clk_info =
+		(struct cam_icp_clk_info *)task_data->data;
+	uint32_t id;
+	uint32_t i;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_a5_clk_update_cmd clk_upd_cmd;
+	int rc = 0;
+	bool busy = false;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	clk_info->base_clk = 0;
+	clk_info->curr_clk = 0;
+	clk_info->over_clked = 0;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx_data = &hw_mgr->ctx_data[i];
+		mutex_lock(&ctx_data->ctx_mutex);
+		if ((ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED) &&
+			(ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type)
+			== clk_info->hw_type)) {
+			busy = cam_icp_frame_pending(ctx_data);
+			if (busy) {
+				mutex_unlock(&ctx_data->ctx_mutex);
+				break;
+			}
+			cam_icp_ctx_clk_info_init(ctx_data);
+		}
+		mutex_unlock(&ctx_data->ctx_mutex);
+	}
+
+	if (busy) {
+		cam_icp_device_timer_reset(hw_mgr, clk_info->hw_type);
+		rc = -EBUSY;
+		goto done;
+	}
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	if (clk_info->hw_type == ICP_CLK_HW_BPS) {
+		dev_intf = bps_dev_intf;
+		id = CAM_ICP_BPS_CMD_DISABLE_CLK;
+	} else if (clk_info->hw_type == ICP_CLK_HW_IPE) {
+		dev_intf = ipe0_dev_intf;
+		id = CAM_ICP_IPE_CMD_DISABLE_CLK;
+	} else {
+		CAM_ERR(CAM_ICP, "Error");
+		goto done;
+	}
+
+	CAM_DBG(CAM_ICP, "Disable %d", clk_info->hw_type);
+
+	clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
+
+	if (clk_info->hw_type != ICP_CLK_HW_BPS)
+		if (ipe1_dev_intf)
+			ipe1_dev_intf->hw_ops.process_cmd(
+				ipe1_dev_intf->hw_priv, id,
+				&clk_upd_cmd,
+				sizeof(struct cam_a5_clk_update_cmd));
+
+done:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int32_t cam_icp_ctx_timer(void *priv, void *data)
+{
+	struct clk_work_data *task_data = (struct clk_work_data *)data;
+	struct cam_icp_hw_ctx_data *ctx_data =
+		(struct cam_icp_hw_ctx_data *)task_data->data;
+	struct cam_icp_hw_mgr *hw_mgr = &icp_hw_mgr;
+	uint32_t id;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_icp_clk_info *clk_info;
+	struct cam_icp_cpas_vote clk_update;
+
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "ctx_data is NULL, failed to update clk");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	if ((ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) ||
+		(ctx_data->watch_dog_reset_counter == 0)) {
+		CAM_DBG(CAM_ICP, "state %d, counter=%d",
+			ctx_data->state, ctx_data->watch_dog_reset_counter);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return 0;
+	}
+
+	if (cam_icp_frame_pending(ctx_data)) {
+		cam_icp_ctx_timer_reset(ctx_data);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EBUSY;
+	}
+
+	CAM_DBG(CAM_ICP,
+		"E :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+		ctx_data->ctx_id,
+		ctx_data->clk_info.uncompressed_bw,
+		ctx_data->clk_info.compressed_bw,
+		ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+
+	if (!ctx_data->icp_dev_acquire_info) {
+		CAM_WARN(CAM_ICP, "NULL acquire info");
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		dev_intf = bps_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+		id = CAM_ICP_BPS_CMD_VOTE_CPAS;
+	} else {
+		dev_intf = ipe0_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+		id = CAM_ICP_IPE_CMD_VOTE_CPAS;
+	}
+
+	clk_info->compressed_bw -= ctx_data->clk_info.compressed_bw;
+	clk_info->uncompressed_bw -= ctx_data->clk_info.uncompressed_bw;
+	ctx_data->clk_info.uncompressed_bw = 0;
+	ctx_data->clk_info.compressed_bw = 0;
+	ctx_data->clk_info.curr_fc = 0;
+	ctx_data->clk_info.base_clk = 0;
+
+	clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+	clk_update.ahb_vote.vote.freq = 0;
+	clk_update.ahb_vote_valid = false;
+	clk_update.axi_vote.compressed_bw = clk_info->compressed_bw;
+	clk_update.axi_vote.uncompressed_bw = clk_info->uncompressed_bw;
+	clk_update.axi_vote_valid = true;
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_update, sizeof(clk_update));
+
+	CAM_DBG(CAM_ICP,
+		"X :ctx_id = %d ubw = %lld cbw = %lld curr_fc = %u bc = %u",
+		ctx_data->ctx_id,
+		ctx_data->clk_info.uncompressed_bw,
+		ctx_data->clk_info.compressed_bw,
+		ctx_data->clk_info.curr_fc, ctx_data->clk_info.base_clk);
+
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	return 0;
+}
+
+static void cam_icp_ctx_timer_cb(struct timer_list *timer_data)
+{
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct clk_work_data *task_data;
+	struct cam_req_mgr_timer *timer =
+		container_of(timer_data, struct cam_req_mgr_timer, sys_timer);
+
+	spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.timer_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+		return;
+	}
+
+	task_data = (struct clk_work_data *)task->payload;
+	task_data->data = timer->parent;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_ctx_timer;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
+static void cam_icp_device_timer_cb(struct timer_list *timer_data)
+{
+	unsigned long flags;
+	struct crm_workq_task *task;
+	struct clk_work_data *task_data;
+	struct cam_req_mgr_timer *timer =
+		container_of(timer_data, struct cam_req_mgr_timer, sys_timer);
+
+	spin_lock_irqsave(&icp_hw_mgr.hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.timer_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+		return;
+	}
+
+	task_data = (struct clk_work_data *)task->payload;
+	task_data->data = timer->parent;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_deinit_idle_clk;
+	cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&icp_hw_mgr.hw_mgr_lock, flags);
+}
+
+static int cam_icp_clk_info_init(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+
+	for (i = 0; i < ICP_CLK_HW_MAX; i++) {
+		hw_mgr->clk_info[i].base_clk = ICP_CLK_SVS_HZ;
+		hw_mgr->clk_info[i].curr_clk = ICP_CLK_SVS_HZ;
+		hw_mgr->clk_info[i].threshold = ICP_OVER_CLK_THRESHOLD;
+		hw_mgr->clk_info[i].over_clked = 0;
+		hw_mgr->clk_info[i].uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		hw_mgr->clk_info[i].compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		hw_mgr->clk_info[i].hw_type = i;
+		hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
+	}
+	hw_mgr->icp_default_clk = ICP_CLK_SVS_HZ;
+
+	return 0;
+}
+
+static int cam_icp_ctx_timer_start(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+
+	rc = crm_timer_init(&ctx_data->watch_dog,
+		200, ctx_data, &cam_icp_ctx_timer_cb);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to start timer");
+
+	ctx_data->watch_dog_reset_counter = 0;
+
+	CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+	return rc;
+}
+
+static int cam_icp_device_timer_start(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < ICP_CLK_HW_MAX; i++)  {
+		if (!hw_mgr->clk_info[i].watch_dog) {
+			rc = crm_timer_init(&hw_mgr->clk_info[i].watch_dog,
+				ICP_DEVICE_IDLE_TIMEOUT, &hw_mgr->clk_info[i],
+				&cam_icp_device_timer_cb);
+
+			if (rc)
+				CAM_ERR(CAM_ICP, "Failed to start timer %d", i);
+
+			hw_mgr->clk_info[i].watch_dog_reset_counter = 0;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_icp_ctx_timer_stop(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	if (ctx_data->watch_dog) {
+		CAM_DBG(CAM_ICP, "stop timer : ctx_id = %d", ctx_data->ctx_id);
+		ctx_data->watch_dog_reset_counter = 0;
+		crm_timer_exit(&ctx_data->watch_dog);
+		ctx_data->watch_dog = NULL;
+	}
+
+	return 0;
+}
+
+static void cam_icp_device_timer_stop(struct cam_icp_hw_mgr *hw_mgr)
+{
+	if (!hw_mgr->bps_ctxt_cnt &&
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog) {
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog_reset_counter = 0;
+		crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog);
+		hw_mgr->clk_info[ICP_CLK_HW_BPS].watch_dog = NULL;
+	}
+
+	if (!hw_mgr->ipe_ctxt_cnt &&
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog) {
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog_reset_counter = 0;
+		crm_timer_exit(&hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog);
+		hw_mgr->clk_info[ICP_CLK_HW_IPE].watch_dog = NULL;
+	}
+}
+
+static uint32_t cam_icp_mgr_calc_base_clk(uint32_t frame_cycles,
+	uint64_t budget)
+{
+	uint64_t base_clk;
+	uint64_t mul = 1000000000;
+
+	base_clk = (frame_cycles * mul) / budget;
+
+	CAM_DBG(CAM_ICP, "budget = %lld fc = %d ib = %lld base_clk = %lld",
+		budget, frame_cycles,
+		(long long)(frame_cycles * mul), base_clk);
+
+	return base_clk;
+}
+
+static bool cam_icp_busy_prev_reqs(struct hfi_frame_process_info *frm_process,
+	uint64_t req_id)
+{
+	int i;
+	int cnt;
+
+	for (i = 0, cnt = 0; i < CAM_FRAME_CMD_MAX; i++) {
+		if (frm_process->request_id[i]) {
+			if (frm_process->fw_process_flag[i]) {
+				CAM_DBG(CAM_ICP, "r id = %lld busy = %d",
+					frm_process->request_id[i],
+					frm_process->fw_process_flag[i]);
+				cnt++;
+			}
+		}
+	}
+	if (cnt > 1)
+		return true;
+
+	return false;
+}
+
+static int cam_icp_calc_total_clk(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_clk_info *hw_mgr_clk_info, uint32_t dev_type)
+{
+	int i;
+	struct cam_icp_hw_ctx_data *ctx_data;
+
+	hw_mgr_clk_info->base_clk = 0;
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx_data = &hw_mgr->ctx_data[i];
+		if (ctx_data->state == CAM_ICP_CTX_STATE_ACQUIRED &&
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type) ==
+			ICP_DEV_TYPE_TO_CLK_TYPE(dev_type))
+			hw_mgr_clk_info->base_clk +=
+				ctx_data->clk_info.base_clk;
+	}
+
+	return 0;
+}
+
+static bool cam_icp_update_clk_busy(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_request *clk_info,
+	uint32_t base_clk)
+{
+	uint32_t next_clk_level;
+	uint32_t actual_clk;
+	bool rc = false;
+
+	/* 1. if current request frame cycles(fc) are more than previous
+	 *      frame fc
+	 *      Calculate the new base clock.
+	 *      if sum of base clocks are more than next available clk level
+	 *       Update clock rate, change curr_clk_rate to sum of base clock
+	 *       rates and make over_clked to zero
+	 *      else
+	 *       Update clock rate to next level, update curr_clk_rate and make
+	 *       overclked cnt to zero
+	 * 2. if current fc is less than or equal to previous  frame fc
+	 *      Still Bump up the clock to next available level
+	 *      if it is available, then update clock, make overclk cnt to
+	 *      zero. If the clock is already at highest clock rate then
+	 *      no need to update the clock
+	 */
+	ctx_data->clk_info.base_clk = base_clk;
+	hw_mgr_clk_info->over_clked = 0;
+	if (clk_info->frame_cycles > ctx_data->clk_info.curr_fc) {
+		cam_icp_calc_total_clk(hw_mgr, hw_mgr_clk_info,
+			ctx_data->icp_dev_acquire_info->dev_type);
+		actual_clk = cam_icp_get_actual_clk_rate(hw_mgr,
+			ctx_data, base_clk);
+		if (hw_mgr_clk_info->base_clk > actual_clk) {
+			hw_mgr_clk_info->curr_clk = hw_mgr_clk_info->base_clk;
+		} else {
+			next_clk_level = cam_icp_get_next_clk_rate(hw_mgr,
+				ctx_data, hw_mgr_clk_info->curr_clk);
+			hw_mgr_clk_info->curr_clk = next_clk_level;
+		}
+		rc = true;
+	} else {
+		next_clk_level =
+			cam_icp_get_next_clk_rate(hw_mgr, ctx_data,
+			hw_mgr_clk_info->curr_clk);
+		if (hw_mgr_clk_info->curr_clk < next_clk_level) {
+			hw_mgr_clk_info->curr_clk = next_clk_level;
+			rc = true;
+		}
+	}
+	ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
+
+	return rc;
+}
+
+static bool cam_icp_update_clk_overclk_free(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_request *clk_info,
+	uint32_t base_clk)
+{
+	int rc = false;
+
+	/*
+	 * In caseof no pending packets case
+	 *    1. In caseof overclk cnt is less than threshold, increase
+	 *       overclk count and no update in the clock rate
+	 *    2. In caseof overclk cnt is greater than or equal to threshold
+	 *       then lower clock rate by one level and update hw_mgr current
+	 *       clock value.
+	 *        a. In case of new clock rate greater than sum of clock
+	 *           rates, reset overclk count value to zero if it is
+	 *           overclock
+	 *        b. if it is less than sum of base clocks then go to next
+	 *           level of clock and make overclk count to zero
+	 *        c. if it is same as sum of base clock rates update overclock
+	 *           cnt to 0
+	 */
+	if (hw_mgr_clk_info->over_clked < hw_mgr_clk_info->threshold) {
+		hw_mgr_clk_info->over_clked++;
+		rc = false;
+	} else {
+		hw_mgr_clk_info->curr_clk =
+			cam_icp_get_lower_clk_rate(hw_mgr, ctx_data,
+			hw_mgr_clk_info->curr_clk);
+		if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) {
+			if (cam_icp_is_over_clk(hw_mgr, ctx_data,
+				hw_mgr_clk_info))
+				hw_mgr_clk_info->over_clked = 0;
+		} else if (hw_mgr_clk_info->curr_clk <
+			hw_mgr_clk_info->base_clk) {
+			hw_mgr_clk_info->curr_clk =
+				cam_icp_get_next_clk_rate(hw_mgr, ctx_data,
+				hw_mgr_clk_info->curr_clk);
+				hw_mgr_clk_info->over_clked = 0;
+		} else if (hw_mgr_clk_info->curr_clk ==
+			hw_mgr_clk_info->base_clk) {
+			hw_mgr_clk_info->over_clked = 0;
+		}
+		rc = true;
+	}
+
+	return rc;
+}
+
+static bool cam_icp_update_clk_free(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_request *clk_info,
+	uint32_t base_clk)
+{
+	int rc = false;
+	bool over_clocked = false;
+
+	ctx_data->clk_info.curr_fc = clk_info->frame_cycles;
+	ctx_data->clk_info.base_clk = base_clk;
+	cam_icp_calc_total_clk(hw_mgr, hw_mgr_clk_info,
+		ctx_data->icp_dev_acquire_info->dev_type);
+
+	/*
+	 * Current clock is not always sum of base clocks, due to
+	 * clock scales update to next higher or lower levels, it
+	 * equals to one of discrete clock values supported by hardware.
+	 * So even current clock is higher than sum of base clocks, we
+	 * can not consider it is over clocked. if it is greater than
+	 * discrete clock level then only it is considered as over clock.
+	 * 1. Handle over clock case
+	 * 2. If current clock is less than sum of base clocks
+	 *    update current clock
+	 * 3. If current clock is same as sum of base clocks no action
+	 */
+
+	over_clocked = cam_icp_is_over_clk(hw_mgr, ctx_data,
+		hw_mgr_clk_info);
+
+	if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk &&
+		over_clocked) {
+		rc = cam_icp_update_clk_overclk_free(hw_mgr, ctx_data,
+			hw_mgr_clk_info, clk_info, base_clk);
+	} else if (hw_mgr_clk_info->curr_clk > hw_mgr_clk_info->base_clk) {
+		hw_mgr_clk_info->over_clked = 0;
+		rc = false;
+	}  else if (hw_mgr_clk_info->curr_clk < hw_mgr_clk_info->base_clk) {
+		hw_mgr_clk_info->curr_clk = cam_icp_get_actual_clk_rate(hw_mgr,
+			ctx_data, hw_mgr_clk_info->base_clk);
+		rc = true;
+	}
+
+	return rc;
+}
+
+static bool cam_icp_debug_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+	if (icp_hw_mgr.icp_debug_clk < ICP_CLK_TURBO_HZ &&
+		icp_hw_mgr.icp_debug_clk &&
+		icp_hw_mgr.icp_debug_clk != hw_mgr_clk_info->curr_clk) {
+		hw_mgr_clk_info->base_clk = icp_hw_mgr.icp_debug_clk;
+		hw_mgr_clk_info->curr_clk = icp_hw_mgr.icp_debug_clk;
+		hw_mgr_clk_info->uncompressed_bw = icp_hw_mgr.icp_debug_clk;
+		hw_mgr_clk_info->compressed_bw = icp_hw_mgr.icp_debug_clk;
+		CAM_DBG(CAM_ICP, "bc = %d cc = %d",
+			hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk);
+		return true;
+	}
+
+	return false;
+}
+
+static bool cam_icp_default_clk_update(struct cam_icp_clk_info *hw_mgr_clk_info)
+{
+	if (icp_hw_mgr.icp_default_clk != hw_mgr_clk_info->curr_clk) {
+		hw_mgr_clk_info->base_clk = icp_hw_mgr.icp_default_clk;
+		hw_mgr_clk_info->curr_clk = icp_hw_mgr.icp_default_clk;
+		hw_mgr_clk_info->uncompressed_bw = icp_hw_mgr.icp_default_clk;
+		hw_mgr_clk_info->compressed_bw = icp_hw_mgr.icp_default_clk;
+		CAM_DBG(CAM_ICP, "bc = %d cc = %d",
+			hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk);
+		return true;
+	}
+
+	return false;
+}
+
+static bool cam_icp_update_bw(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_icp_clk_info *hw_mgr_clk_info,
+	struct cam_icp_clk_bw_request *clk_info,
+	bool busy)
+{
+	int i;
+	struct cam_icp_hw_ctx_data *ctx;
+
+	/*
+	 * If current request bandwidth is different from previous frames, then
+	 * recalculate bandwidth of all contexts of same hardware and update
+	 * voting of bandwidth
+	 */
+	CAM_DBG(CAM_ICP, "ubw ctx = %lld clk_info ubw = %lld busy = %d",
+		ctx_data->clk_info.uncompressed_bw,
+		clk_info->uncompressed_bw, busy);
+
+	if ((clk_info->uncompressed_bw == ctx_data->clk_info.uncompressed_bw) &&
+		(ctx_data->clk_info.uncompressed_bw ==
+		hw_mgr_clk_info->uncompressed_bw))
+		return false;
+
+	if (busy &&
+		ctx_data->clk_info.uncompressed_bw > clk_info->uncompressed_bw)
+		return false;
+
+	ctx_data->clk_info.uncompressed_bw = clk_info->uncompressed_bw;
+	ctx_data->clk_info.compressed_bw = clk_info->compressed_bw;
+	hw_mgr_clk_info->uncompressed_bw = 0;
+	hw_mgr_clk_info->compressed_bw = 0;
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		ctx = &hw_mgr->ctx_data[i];
+		if (ctx->state == CAM_ICP_CTX_STATE_ACQUIRED &&
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx->icp_dev_acquire_info->dev_type) ==
+			ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type)) {
+			hw_mgr_clk_info->uncompressed_bw +=
+				ctx->clk_info.uncompressed_bw;
+			hw_mgr_clk_info->compressed_bw +=
+				ctx->clk_info.compressed_bw;
+			CAM_DBG(CAM_ICP, "ubw = %lld, cbw = %lld",
+				hw_mgr_clk_info->uncompressed_bw,
+				hw_mgr_clk_info->compressed_bw);
+		}
+	}
+
+	return true;
+}
+
+static bool cam_icp_check_clk_update(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+	bool busy, rc = false;
+	uint32_t base_clk;
+	struct cam_icp_clk_bw_request *clk_info;
+	struct hfi_frame_process_info *frame_info;
+	uint64_t req_id;
+	struct cam_icp_clk_info *hw_mgr_clk_info;
+
+	cam_icp_ctx_timer_reset(ctx_data);
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_BPS);
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+		CAM_DBG(CAM_ICP, "Reset bps timer");
+	} else {
+		cam_icp_device_timer_reset(hw_mgr, ICP_CLK_HW_IPE);
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+		CAM_DBG(CAM_ICP, "Reset ipe timer");
+	}
+
+	if (icp_hw_mgr.icp_debug_clk)
+		return cam_icp_debug_clk_update(hw_mgr_clk_info);
+
+	/* Check is there any pending frames in this context */
+	frame_info = &ctx_data->hfi_frame_process;
+	req_id = frame_info->request_id[idx];
+	busy = cam_icp_busy_prev_reqs(frame_info, req_id);
+	CAM_DBG(CAM_ICP, "busy = %d req_id = %lld", busy, req_id);
+
+	clk_info = &ctx_data->hfi_frame_process.clk_info[idx];
+	if (!clk_info->frame_cycles)
+		return cam_icp_default_clk_update(hw_mgr_clk_info);
+
+	/* Calculate base clk rate */
+	base_clk = cam_icp_mgr_calc_base_clk(
+		clk_info->frame_cycles, clk_info->budget_ns);
+	ctx_data->clk_info.rt_flag = clk_info->rt_flag;
+
+	if (busy)
+		rc = cam_icp_update_clk_busy(hw_mgr, ctx_data,
+			hw_mgr_clk_info, clk_info, base_clk);
+	else
+		rc = cam_icp_update_clk_free(hw_mgr, ctx_data,
+			hw_mgr_clk_info, clk_info, base_clk);
+
+	CAM_DBG(CAM_ICP, "bc = %d cc = %d busy = %d overclk = %d uc = %d",
+		hw_mgr_clk_info->base_clk, hw_mgr_clk_info->curr_clk,
+		busy, hw_mgr_clk_info->over_clked, rc);
+
+	return rc;
+}
+
+static bool cam_icp_check_bw_update(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+	bool busy, rc = false;
+	struct cam_icp_clk_bw_request *clk_info;
+	struct cam_icp_clk_info *hw_mgr_clk_info;
+	struct hfi_frame_process_info *frame_info;
+	uint64_t req_id;
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+	else
+		hw_mgr_clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+
+	clk_info = &ctx_data->hfi_frame_process.clk_info[idx];
+	frame_info = &ctx_data->hfi_frame_process;
+	req_id = frame_info->request_id[idx];
+	busy = cam_icp_busy_prev_reqs(frame_info, req_id);
+	rc = cam_icp_update_bw(hw_mgr, ctx_data, hw_mgr_clk_info,
+		clk_info, busy);
+
+	CAM_DBG(CAM_ICP, "ubw = %lld, cbw = %lld, update_bw = %d",
+		hw_mgr_clk_info->uncompressed_bw,
+		hw_mgr_clk_info->compressed_bw, rc);
+
+	return rc;
+}
+
+static int cam_icp_update_clk_rate(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	uint32_t id;
+	uint32_t curr_clk_rate;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_a5_clk_update_cmd clk_upd_cmd;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		dev_intf = bps_dev_intf;
+		curr_clk_rate = hw_mgr->clk_info[ICP_CLK_HW_BPS].curr_clk;
+		id = CAM_ICP_BPS_CMD_UPDATE_CLK;
+	} else {
+		dev_intf = ipe0_dev_intf;
+		curr_clk_rate = hw_mgr->clk_info[ICP_CLK_HW_IPE].curr_clk;
+		id = CAM_ICP_IPE_CMD_UPDATE_CLK;
+	}
+
+	CAM_DBG(CAM_PERF, "clk_rate %u for dev_type %d", curr_clk_rate,
+		ctx_data->icp_dev_acquire_info->dev_type);
+	clk_upd_cmd.curr_clk_rate = curr_clk_rate;
+	clk_upd_cmd.ipe_bps_pc_enable = icp_hw_mgr.ipe_bps_pc_flag;
+
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_upd_cmd, sizeof(struct cam_a5_clk_update_cmd));
+
+	if (ctx_data->icp_dev_acquire_info->dev_type != CAM_ICP_RES_TYPE_BPS)
+		if (ipe1_dev_intf)
+			ipe1_dev_intf->hw_ops.process_cmd(
+				ipe1_dev_intf->hw_priv, id,
+				&clk_upd_cmd,
+				sizeof(struct cam_a5_clk_update_cmd));
+
+	return 0;
+}
+
+static int cam_icp_update_cpas_vote(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	uint32_t id;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	struct cam_hw_intf *dev_intf = NULL;
+	struct cam_icp_clk_info *clk_info;
+	struct cam_icp_cpas_vote clk_update;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to update clk");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		dev_intf = bps_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_BPS];
+		id = CAM_ICP_BPS_CMD_VOTE_CPAS;
+	} else {
+		dev_intf = ipe0_dev_intf;
+		clk_info = &hw_mgr->clk_info[ICP_CLK_HW_IPE];
+		id = CAM_ICP_IPE_CMD_VOTE_CPAS;
+	}
+
+	clk_update.ahb_vote.type = CAM_VOTE_DYNAMIC;
+	clk_update.ahb_vote.vote.freq = 0;
+	clk_update.ahb_vote_valid = false;
+	clk_update.axi_vote.compressed_bw = clk_info->compressed_bw;
+	clk_update.axi_vote.uncompressed_bw = clk_info->uncompressed_bw;
+	clk_update.axi_vote_valid = true;
+	dev_intf->hw_ops.process_cmd(dev_intf->hw_priv, id,
+		&clk_update, sizeof(clk_update));
+
+	/*
+	 * Consolidated bw needs to be voted on only one IPE client. Otherwise
+	 * total bw that we vote at bus client would be doubled. So either
+	 * remove voting on IPE1 or divide the vote for each IPE client
+	 * and vote to cpas - cpas will add up and vote full bw to sf client
+	 * anyway.
+	 */
+
+	CAM_DBG(CAM_ICP, "compress_bw %llu uncompress_bw %llu dev_type %d",
+		clk_info->compressed_bw, clk_info->uncompressed_bw,
+		ctx_data->icp_dev_acquire_info->dev_type);
+
+	return 0;
+}
+
+static int cam_icp_mgr_ipe_bps_clk_update(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, int idx)
+{
+	int rc = 0;
+
+	if (cam_icp_check_clk_update(hw_mgr, ctx_data, idx))
+		rc = cam_icp_update_clk_rate(hw_mgr, ctx_data);
+
+	if (cam_icp_check_bw_update(hw_mgr, ctx_data, idx))
+		rc |= cam_icp_update_cpas_vote(hw_mgr, ctx_data);
+
+	return rc;
+}
+
+static int cam_icp_mgr_ipe_bps_resume(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+	int rc = 0;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS) {
+		if (hw_mgr->bps_ctxt_cnt++)
+			goto end;
+		if (!hw_mgr->bps_clk_state) {
+			bps_dev_intf->hw_ops.init(
+				bps_dev_intf->hw_priv, NULL, 0);
+			hw_mgr->bps_clk_state = true;
+		}
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
+			bps_dev_intf->hw_ops.process_cmd(
+				bps_dev_intf->hw_priv,
+				CAM_ICP_BPS_CMD_POWER_RESUME, NULL, 0);
+			hw_mgr->core_info = hw_mgr->core_info | ICP_PWR_CLP_BPS;
+		}
+	} else {
+		if (hw_mgr->ipe_ctxt_cnt++)
+			goto end;
+		if (!hw_mgr->ipe_clk_state)
+			ipe0_dev_intf->hw_ops.init(
+				ipe0_dev_intf->hw_priv, NULL, 0);
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
+			ipe0_dev_intf->hw_ops.process_cmd(
+				ipe0_dev_intf->hw_priv,
+				CAM_ICP_IPE_CMD_POWER_RESUME, NULL, 0);
+		}
+
+		if ((icp_hw_mgr.ipe1_enable) &&
+			(ipe1_dev_intf) &&
+			(!hw_mgr->ipe_clk_state)) {
+			ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+
+			if (icp_hw_mgr.ipe_bps_pc_flag) {
+				ipe1_dev_intf->hw_ops.process_cmd(
+					ipe1_dev_intf->hw_priv,
+					CAM_ICP_IPE_CMD_POWER_RESUME,
+					NULL, 0);
+			}
+		}
+		hw_mgr->ipe_clk_state = true;
+		if (icp_hw_mgr.ipe_bps_pc_flag) {
+			hw_mgr->core_info = hw_mgr->core_info |
+				(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1);
+		}
+	}
+
+	CAM_DBG(CAM_ICP, "core_info %X",  hw_mgr->core_info);
+	if (icp_hw_mgr.ipe_bps_pc_flag)
+		rc = hfi_enable_ipe_bps_pc(true, hw_mgr->core_info);
+	else if (icp_hw_mgr.icp_pc_flag)
+		rc = hfi_enable_ipe_bps_pc(false, hw_mgr->core_info);
+	else
+		rc = hfi_enable_ipe_bps_pc(false, hw_mgr->core_info);
+end:
+	return rc;
+}
+
+static int cam_icp_mgr_ipe_bps_power_collapse(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data, int dev_type)
+{
+	int rc = 0, dev;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return -EINVAL;
+	}
+
+	if (!ctx_data)
+		dev = dev_type;
+	else
+		dev = ctx_data->icp_dev_acquire_info->dev_type;
+
+	if (dev == CAM_ICP_RES_TYPE_BPS) {
+		CAM_DBG(CAM_ICP, "bps ctx cnt %d", hw_mgr->bps_ctxt_cnt);
+		if (ctx_data)
+			--hw_mgr->bps_ctxt_cnt;
+
+		if (hw_mgr->bps_ctxt_cnt)
+			goto end;
+
+		if (icp_hw_mgr.ipe_bps_pc_flag &&
+			!atomic_read(&hw_mgr->recovery)) {
+			rc = bps_dev_intf->hw_ops.process_cmd(
+				bps_dev_intf->hw_priv,
+				CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+				NULL, 0);
+			hw_mgr->core_info =
+				hw_mgr->core_info & (~ICP_PWR_CLP_BPS);
+		}
+
+		if (hw_mgr->bps_clk_state) {
+			bps_dev_intf->hw_ops.deinit
+				(bps_dev_intf->hw_priv, NULL, 0);
+			hw_mgr->bps_clk_state = false;
+		}
+	} else {
+		CAM_DBG(CAM_ICP, "ipe ctx cnt %d", hw_mgr->ipe_ctxt_cnt);
+		if (ctx_data)
+			--hw_mgr->ipe_ctxt_cnt;
+
+		if (hw_mgr->ipe_ctxt_cnt)
+			goto end;
+
+		if (icp_hw_mgr.ipe_bps_pc_flag &&
+			!atomic_read(&hw_mgr->recovery)) {
+			rc = ipe0_dev_intf->hw_ops.process_cmd(
+				ipe0_dev_intf->hw_priv,
+				CAM_ICP_IPE_CMD_POWER_COLLAPSE, NULL, 0);
+		}
+
+		if (hw_mgr->ipe_clk_state)
+			ipe0_dev_intf->hw_ops.deinit(
+				ipe0_dev_intf->hw_priv, NULL, 0);
+
+		if (ipe1_dev_intf) {
+			if (icp_hw_mgr.ipe_bps_pc_flag &&
+				!atomic_read(&hw_mgr->recovery)) {
+				rc = ipe1_dev_intf->hw_ops.process_cmd(
+					ipe1_dev_intf->hw_priv,
+					CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+					NULL, 0);
+			}
+
+		if (hw_mgr->ipe_clk_state)
+			ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+		}
+
+		hw_mgr->ipe_clk_state = false;
+		if (icp_hw_mgr.ipe_bps_pc_flag &&
+			!atomic_read(&hw_mgr->recovery)) {
+			hw_mgr->core_info = hw_mgr->core_info &
+				(~(ICP_PWR_CLP_IPE0 | ICP_PWR_CLP_IPE1));
+		}
+	}
+
+	CAM_DBG(CAM_ICP, "Exit: core_info = %x", hw_mgr->core_info);
+end:
+	return rc;
+}
+
+static int cam_icp_set_dbg_default_clk(void *data, u64 val)
+{
+	icp_hw_mgr.icp_debug_clk = val;
+	return 0;
+}
+
+static int cam_icp_get_dbg_default_clk(void *data, u64 *val)
+{
+	*val = icp_hw_mgr.icp_debug_clk;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_default_clk,
+	cam_icp_get_dbg_default_clk,
+	cam_icp_set_dbg_default_clk, "%16llu");
+
+static int cam_icp_set_a5_dbg_lvl(void *data, u64 val)
+{
+	icp_hw_mgr.a5_dbg_lvl = val;
+	return 0;
+}
+
+static int cam_icp_get_a5_dbg_lvl(void *data, u64 *val)
+{
+	*val = icp_hw_mgr.a5_dbg_lvl;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_fs, cam_icp_get_a5_dbg_lvl,
+	cam_icp_set_a5_dbg_lvl, "%08llu");
+
+static int cam_icp_set_a5_dbg_type(void *data, u64 val)
+{
+	if (val <= NUM_HFI_DEBUG_MODE)
+		icp_hw_mgr.a5_debug_type = val;
+	return 0;
+}
+
+static int cam_icp_get_a5_dbg_type(void *data, u64 *val)
+{
+	*val = icp_hw_mgr.a5_debug_type;
+	return 0;
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_type_fs, cam_icp_get_a5_dbg_type,
+	cam_icp_set_a5_dbg_type, "%08llu");
+
+static int cam_icp_set_a5_fw_dump_lvl(void *data, u64 val)
+{
+	if (val < NUM_HFI_DUMP_LVL)
+		icp_hw_mgr.a5_fw_dump_lvl = val;
+	return 0;
+}
+
+static int cam_icp_get_a5_fw_dump_lvl(void *data, u64 *val)
+{
+	*val = icp_hw_mgr.a5_fw_dump_lvl;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_icp_debug_fw_dump, cam_icp_get_a5_fw_dump_lvl,
+	cam_icp_set_a5_fw_dump_lvl, "%08llu");
+
+static int cam_icp_hw_mgr_create_debugfs_entry(void)
+{
+	int rc = 0;
+
+	icp_hw_mgr.dentry = debugfs_create_dir("camera_icp", NULL);
+	if (!icp_hw_mgr.dentry)
+		return -ENOMEM;
+
+	if (!debugfs_create_bool("icp_pc",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.icp_pc_flag)) {
+		CAM_ERR(CAM_ICP, "failed to create icp_pc entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_bool("ipe_bps_pc",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.ipe_bps_pc_flag)) {
+		CAM_ERR(CAM_ICP, "failed to create ipe_bps_pc entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_file("icp_debug_clk",
+		0644,
+		icp_hw_mgr.dentry, NULL,
+		&cam_icp_debug_default_clk)) {
+		CAM_ERR(CAM_ICP, "failed to create icp_debug_clk entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_bool("a5_jtag_debug",
+		0644,
+		icp_hw_mgr.dentry,
+		&icp_hw_mgr.a5_jtag_debug)) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_file("a5_debug_type",
+		0644,
+		icp_hw_mgr.dentry,
+		NULL, &cam_icp_debug_type_fs)) {
+		CAM_ERR(CAM_ICP, "failed to create a5_debug_type");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_file("a5_debug_lvl",
+		0644,
+		icp_hw_mgr.dentry,
+		NULL, &cam_icp_debug_fs)) {
+		CAM_ERR(CAM_ICP, "failed to create a5_dbg_lvl");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	if (!debugfs_create_file("a5_fw_dump_lvl",
+		0644,
+		icp_hw_mgr.dentry,
+		NULL, &cam_icp_debug_fw_dump)) {
+		CAM_ERR(CAM_ICP, "failed to create a5_fw_dump_lvl");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	return rc;
+err:
+	debugfs_remove_recursive(icp_hw_mgr.dentry);
+	icp_hw_mgr.dentry = NULL;
+	return rc;
+}
+
+static int cam_icp_mgr_process_cmd(void *priv, void *data)
+{
+	int rc;
+	struct hfi_cmd_work_data *task_data = NULL;
+	struct cam_icp_hw_mgr *hw_mgr;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	hw_mgr = priv;
+	task_data = (struct hfi_cmd_work_data *)data;
+
+	rc = hfi_write_cmd(task_data->data);
+
+	return rc;
+}
+
+static int cam_icp_mgr_cleanup_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+	struct hfi_frame_process_info *hfi_frame_process;
+	struct cam_hw_done_event_data buf_data;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+		if (!hfi_frame_process->request_id[i])
+			continue;
+		buf_data.request_id = hfi_frame_process->request_id[i];
+		ctx_data->ctxt_event_cb(ctx_data->context_priv,
+			false, &buf_data);
+		hfi_frame_process->request_id[i] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[i] > 0) {
+			CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+				ctx_data->hfi_frame_process.in_resource[i]);
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[i]);
+			ctx_data->hfi_frame_process.in_resource[i] = 0;
+		}
+		hfi_frame_process->fw_process_flag[i] = false;
+		clear_bit(i, ctx_data->hfi_frame_process.bitmap);
+	}
+
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++) {
+		if (!hfi_frame_process->in_free_resource[i])
+			continue;
+
+		CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+			ctx_data->hfi_frame_process.in_free_resource[i]);
+		cam_sync_destroy(
+			ctx_data->hfi_frame_process.in_free_resource[i]);
+		ctx_data->hfi_frame_process.in_free_resource[i] = 0;
+	}
+
+	return 0;
+}
+
+static int cam_icp_mgr_handle_frame_process(uint32_t *msg_ptr, int flag)
+{
+	int i;
+	uint32_t idx;
+	uint64_t request_id;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct hfi_frame_process_info *hfi_frame_process;
+	struct cam_hw_done_event_data buf_data;
+	uint32_t clk_type;
+
+	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+	request_id = ioconfig_ack->user_data2;
+	ctx_data = (struct cam_icp_hw_ctx_data *)
+		U64_TO_PTR(ioconfig_ack->user_data1);
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Invalid Context");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	cam_icp_ctx_timer_reset(ctx_data);
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_ICP, "ctx %u is in %d state",
+			ctx_data->ctx_id, ctx_data->state);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return 0;
+	}
+
+	CAM_DBG(CAM_REQ,
+		"ctx_id : %u, request_id :%lld dev_type: %d",
+		ctx_data->ctx_id, request_id,
+		ctx_data->icp_dev_acquire_info->dev_type);
+
+	clk_type = ICP_DEV_TYPE_TO_CLK_TYPE(
+			ctx_data->icp_dev_acquire_info->dev_type);
+	cam_icp_device_timer_reset(&icp_hw_mgr, clk_type);
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+		if (hfi_frame_process->request_id[i] == request_id)
+			break;
+
+	if (i >= CAM_FRAME_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "pkt not found in ctx data for req_id =%lld",
+			request_id);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+	idx = i;
+
+	buf_data.request_id = hfi_frame_process->request_id[idx];
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, flag, &buf_data);
+	hfi_frame_process->request_id[idx] = 0;
+	if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+		CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+			ctx_data->hfi_frame_process.in_resource[idx]);
+		cam_sync_destroy(ctx_data->hfi_frame_process.in_resource[idx]);
+		ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
+	clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+	hfi_frame_process->fw_process_flag[idx] = false;
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_msg_frame_process(uint32_t *msg_ptr)
+{
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct hfi_msg_frame_process_done *frame_done;
+
+	if (!msg_ptr) {
+		CAM_ERR(CAM_ICP, "msg ptr is NULL");
+		return -EINVAL;
+	}
+
+	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+	if (ioconfig_ack->err_type != HFI_ERR_SYS_NONE) {
+		CAM_ERR(CAM_ICP, "failed with error : %u",
+		ioconfig_ack->err_type);
+		return -EIO;
+	}
+
+	frame_done =
+		(struct hfi_msg_frame_process_done *)ioconfig_ack->msg_data;
+	if (!frame_done) {
+		cam_icp_mgr_handle_frame_process(msg_ptr,
+			ICP_FRAME_PROCESS_FAILURE);
+		return -EINVAL;
+	}
+
+	if (frame_done->result)
+		return cam_icp_mgr_handle_frame_process(msg_ptr,
+			ICP_FRAME_PROCESS_FAILURE);
+	else
+		return cam_icp_mgr_handle_frame_process(msg_ptr,
+			ICP_FRAME_PROCESS_SUCCESS);
+}
+
+static int cam_icp_mgr_process_msg_config_io(uint32_t *msg_ptr)
+{
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct hfi_msg_ipe_config *ipe_config_ack = NULL;
+	struct hfi_msg_bps_common *bps_config_ack = NULL;
+
+	if (!msg_ptr) {
+		CAM_ERR(CAM_ICP, "msg ptr is NULL");
+		return -EINVAL;
+	}
+
+	ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+
+	if (ioconfig_ack->opcode == HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO) {
+		ipe_config_ack =
+			(struct hfi_msg_ipe_config *)(ioconfig_ack->msg_data);
+		if (ipe_config_ack->rc) {
+			CAM_ERR(CAM_ICP, "rc = %d err = %u",
+				ipe_config_ack->rc, ioconfig_ack->err_type);
+			return -EIO;
+		}
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
+		if (!ctx_data) {
+			CAM_ERR(CAM_ICP, "wrong ctx data from IPE response");
+			return -EINVAL;
+		}
+		ctx_data->scratch_mem_size = ipe_config_ack->scratch_mem_size;
+	} else {
+		bps_config_ack =
+			(struct hfi_msg_bps_common *)(ioconfig_ack->msg_data);
+		if (bps_config_ack->rc) {
+			CAM_ERR(CAM_ICP, "rc : %u, opcode :%u",
+				bps_config_ack->rc, ioconfig_ack->opcode);
+			return -EIO;
+		}
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
+		if (!ctx_data) {
+			CAM_ERR(CAM_ICP, "wrong ctx data from BPS response");
+			return -EINVAL;
+		}
+	}
+	complete(&ctx_data->wait_complete);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_msg_create_handle(uint32_t *msg_ptr)
+{
+	struct hfi_msg_create_handle_ack *create_handle_ack = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	int rc = 0;
+
+	create_handle_ack = (struct hfi_msg_create_handle_ack *)msg_ptr;
+	if (!create_handle_ack) {
+		CAM_ERR(CAM_ICP, "Invalid create_handle_ack");
+		return -EINVAL;
+	}
+
+	ctx_data =
+		(struct cam_icp_hw_ctx_data *)(uintptr_t)
+		create_handle_ack->user_data1;
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Invalid ctx_data");
+		return -EINVAL;
+	}
+
+	if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE) {
+		ctx_data->fw_handle = create_handle_ack->fw_handle;
+		CAM_DBG(CAM_ICP, "fw_handle = %x", ctx_data->fw_handle);
+	} else {
+		CAM_WARN(CAM_ICP,
+			"This ctx is no longer in use current state: %d",
+			ctx_data->state);
+		ctx_data->fw_handle = 0;
+		rc = -EPERM;
+	}
+	complete(&ctx_data->wait_complete);
+	return rc;
+}
+
+static int cam_icp_mgr_process_msg_ping_ack(uint32_t *msg_ptr)
+{
+	struct hfi_msg_ping_ack *ping_ack = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+	ping_ack = (struct hfi_msg_ping_ack *)msg_ptr;
+	if (!ping_ack) {
+		CAM_ERR(CAM_ICP, "Empty ping ack message");
+		return -EINVAL;
+	}
+
+	ctx_data = (struct cam_icp_hw_ctx_data *)
+		U64_TO_PTR(ping_ack->user_data);
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Invalid ctx_data");
+		return -EINVAL;
+	}
+
+	if (ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)
+		complete(&ctx_data->wait_complete);
+
+	return 0;
+}
+
+static int cam_icp_mgr_process_indirect_ack_msg(uint32_t *msg_ptr)
+{
+	int rc;
+
+	if (!msg_ptr) {
+		CAM_ERR(CAM_ICP, "msg ptr is NULL");
+		return -EINVAL;
+	}
+
+	switch (msg_ptr[ICP_PACKET_OPCODE]) {
+	case HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO:
+		CAM_DBG(CAM_ICP, "received IPE/BPS_CONFIG_IO:");
+		rc = cam_icp_mgr_process_msg_config_io(msg_ptr);
+		if (rc)
+			return rc;
+		break;
+
+	case HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS:
+		rc = cam_icp_mgr_process_msg_frame_process(msg_ptr);
+		if (rc)
+			return rc;
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid opcode : %u",
+			msg_ptr[ICP_PACKET_OPCODE]);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_process_direct_ack_msg(uint32_t *msg_ptr)
+{
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct hfi_msg_ipebps_async_ack *ioconfig_ack = NULL;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	int rc = 0;
+
+	a5_dev_intf = icp_hw_mgr.a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+	switch (msg_ptr[ICP_PACKET_OPCODE]) {
+	case HFI_IPEBPS_CMD_OPCODE_IPE_ABORT:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_ABORT:
+		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
+		if (ctx_data->state != CAM_ICP_CTX_STATE_FREE)
+			complete(&ctx_data->wait_complete);
+		CAM_DBG(CAM_ICP, "received IPE/BPS/ ABORT: ctx_state =%d",
+			ctx_data->state);
+		break;
+	case HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY:
+	case HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY:
+		ioconfig_ack = (struct hfi_msg_ipebps_async_ack *)msg_ptr;
+		ctx_data = (struct cam_icp_hw_ctx_data *)
+			U64_TO_PTR(ioconfig_ack->user_data1);
+		if ((ctx_data->state == CAM_ICP_CTX_STATE_RELEASE) ||
+			(ctx_data->state == CAM_ICP_CTX_STATE_IN_USE)) {
+			complete(&ctx_data->wait_complete);
+		}
+		CAM_DBG(CAM_ICP, "received IPE/BPS/ DESTROY: ctx_state =%d",
+			ctx_data->state);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid opcode : %u",
+			msg_ptr[ICP_PACKET_OPCODE]);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int cam_icp_ipebps_reset(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *ipe0_dev_intf;
+	struct cam_hw_intf *ipe1_dev_intf;
+	struct cam_hw_intf *bps_dev_intf;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	rc = bps_dev_intf->hw_ops.process_cmd(
+		bps_dev_intf->hw_priv,
+		CAM_ICP_BPS_CMD_RESET,
+		NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_ICP, "bps reset failed");
+
+	rc = ipe0_dev_intf->hw_ops.process_cmd(
+		ipe0_dev_intf->hw_priv,
+		CAM_ICP_IPE_CMD_RESET,
+		NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_ICP, "ipe0 reset failed");
+
+	if (ipe1_dev_intf) {
+		rc = ipe1_dev_intf->hw_ops.process_cmd(
+			ipe1_dev_intf->hw_priv,
+			CAM_ICP_IPE_CMD_RESET,
+			NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_ICP, "ipe1 reset failed");
+	}
+
+	return 0;
+}
+
+static int cam_icp_mgr_trigger_recovery(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct sfr_buf *sfr_buffer = NULL;
+
+	CAM_DBG(CAM_ICP, "Enter");
+
+	if (atomic_read(&hw_mgr->recovery)) {
+		CAM_ERR(CAM_ICP, "Recovery is set");
+		return rc;
+	}
+
+	sfr_buffer = (struct sfr_buf *)icp_hw_mgr.hfi_mem.sfr_buf.kva;
+	CAM_WARN(CAM_ICP, "SFR:%s", sfr_buffer->msg);
+
+	cam_icp_ipebps_reset(hw_mgr);
+
+	atomic_set(&hw_mgr->recovery, 1);
+	CAM_DBG(CAM_ICP, "Done");
+	return rc;
+}
+static int cam_icp_mgr_process_fatal_error(
+	struct cam_icp_hw_mgr *hw_mgr, uint32_t *msg_ptr)
+{
+	struct hfi_msg_event_notify *event_notify;
+	int rc = 0;
+
+	CAM_DBG(CAM_ICP, "Enter");
+
+	event_notify = (struct hfi_msg_event_notify *)msg_ptr;
+	if (!event_notify) {
+		CAM_ERR(CAM_ICP, "Empty event message");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "evt_id: %u evt_data1: %u evt_data2: %u",
+		event_notify->event_id,
+		event_notify->event_data1,
+		event_notify->event_data2);
+
+	if (event_notify->event_id == HFI_EVENT_SYS_ERROR) {
+		CAM_INFO(CAM_ICP, "received HFI_EVENT_SYS_ERROR");
+		rc = cam_icp_mgr_trigger_recovery(hw_mgr);
+	}
+
+	return rc;
+}
+
+static void cam_icp_mgr_process_dbg_buf(void)
+{
+	uint32_t *msg_ptr = NULL, *pkt_ptr = NULL;
+	struct hfi_msg_debug *dbg_msg;
+	uint32_t read_len, size_processed = 0;
+	uint64_t timestamp = 0;
+	char *dbg_buf;
+	int rc = 0;
+
+	rc = hfi_read_message(icp_hw_mgr.dbg_buf, Q_DBG, &read_len);
+	if (rc)
+		return;
+
+	msg_ptr = (uint32_t *)icp_hw_mgr.dbg_buf;
+	while (true) {
+		pkt_ptr = msg_ptr;
+		if (pkt_ptr[ICP_PACKET_TYPE] == HFI_MSG_SYS_DEBUG) {
+			dbg_msg = (struct hfi_msg_debug *)pkt_ptr;
+			dbg_buf = (char *)&dbg_msg->msg_data;
+			timestamp = ((((uint64_t)(dbg_msg->timestamp_hi) << 32)
+				| dbg_msg->timestamp_lo) >> 16);
+			trace_cam_icp_fw_dbg(dbg_buf, timestamp/2);
+		}
+		size_processed += (pkt_ptr[ICP_PACKET_SIZE] >>
+			BYTE_WORD_SHIFT);
+		if (size_processed >= read_len)
+			return;
+		msg_ptr += (pkt_ptr[ICP_PACKET_SIZE] >>
+		BYTE_WORD_SHIFT);
+		pkt_ptr = NULL;
+		dbg_msg = NULL;
+		dbg_buf = NULL;
+	}
+}
+
+static int cam_icp_process_msg_pkt_type(
+	struct cam_icp_hw_mgr *hw_mgr,
+	uint32_t *msg_ptr,
+	uint32_t *msg_processed_len)
+{
+	int rc = 0;
+	int size_processed = 0;
+
+	switch (msg_ptr[ICP_PACKET_TYPE]) {
+	case HFI_MSG_SYS_INIT_DONE:
+		CAM_DBG(CAM_ICP, "received SYS_INIT_DONE");
+		complete(&hw_mgr->a5_complete);
+		size_processed = (
+			(struct hfi_msg_init_done *)msg_ptr)->size;
+		break;
+
+	case HFI_MSG_SYS_PC_PREP_DONE:
+		CAM_DBG(CAM_ICP, "HFI_MSG_SYS_PC_PREP_DONE is received\n");
+		complete(&hw_mgr->a5_complete);
+		size_processed = sizeof(struct hfi_msg_pc_prep_done);
+		break;
+
+	case HFI_MSG_SYS_PING_ACK:
+		CAM_DBG(CAM_ICP, "received SYS_PING_ACK");
+		rc = cam_icp_mgr_process_msg_ping_ack(msg_ptr);
+		size_processed = sizeof(struct hfi_msg_ping_ack);
+		break;
+
+	case HFI_MSG_IPEBPS_CREATE_HANDLE_ACK:
+		CAM_DBG(CAM_ICP, "received IPEBPS_CREATE_HANDLE_ACK");
+		rc = cam_icp_mgr_process_msg_create_handle(msg_ptr);
+		size_processed = sizeof(struct hfi_msg_create_handle_ack);
+		break;
+
+	case HFI_MSG_IPEBPS_ASYNC_COMMAND_INDIRECT_ACK:
+		CAM_DBG(CAM_ICP, "received ASYNC_INDIRECT_ACK");
+		rc = cam_icp_mgr_process_indirect_ack_msg(msg_ptr);
+		size_processed = (
+			(struct hfi_msg_ipebps_async_ack *)msg_ptr)->size;
+		break;
+
+	case  HFI_MSG_IPEBPS_ASYNC_COMMAND_DIRECT_ACK:
+		CAM_DBG(CAM_ICP, "received ASYNC_DIRECT_ACK");
+		rc = cam_icp_mgr_process_direct_ack_msg(msg_ptr);
+		size_processed = (
+			(struct hfi_msg_ipebps_async_ack *)msg_ptr)->size;
+		break;
+
+	case HFI_MSG_EVENT_NOTIFY:
+		CAM_DBG(CAM_ICP, "received EVENT_NOTIFY");
+		size_processed = (
+			(struct hfi_msg_event_notify *)msg_ptr)->size;
+		rc = cam_icp_mgr_process_fatal_error(hw_mgr, msg_ptr);
+		if (rc)
+			CAM_ERR(CAM_ICP, "failed in processing evt notify");
+
+		break;
+
+	default:
+		CAM_ERR(CAM_ICP, "invalid msg : %u",
+			msg_ptr[ICP_PACKET_TYPE]);
+		rc = -EINVAL;
+		break;
+	}
+
+	*msg_processed_len = size_processed;
+	return rc;
+}
+
+static int32_t cam_icp_mgr_process_msg(void *priv, void *data)
+{
+	uint32_t read_len, msg_processed_len;
+	uint32_t *msg_ptr = NULL;
+	struct hfi_msg_work_data *task_data;
+	struct cam_icp_hw_mgr *hw_mgr;
+	int rc = 0;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_ICP, "Invalid data");
+		return -EINVAL;
+	}
+
+	task_data = data;
+	hw_mgr = priv;
+
+	rc = hfi_read_message(icp_hw_mgr.msg_buf, Q_MSG, &read_len);
+	if (rc) {
+		CAM_DBG(CAM_ICP, "Unable to read msg q rc %d", rc);
+	} else {
+		read_len = read_len << BYTE_WORD_SHIFT;
+		msg_ptr = (uint32_t *)icp_hw_mgr.msg_buf;
+		while (true) {
+			cam_icp_process_msg_pkt_type(hw_mgr, msg_ptr,
+				&msg_processed_len);
+
+			if (!msg_processed_len) {
+				CAM_ERR(CAM_ICP, "Failed to read");
+				rc = -EINVAL;
+				break;
+			}
+
+			read_len -= msg_processed_len;
+			if (read_len > 0) {
+				msg_ptr += (msg_processed_len >>
+				BYTE_WORD_SHIFT);
+				msg_processed_len = 0;
+			} else {
+				break;
+			}
+		}
+	}
+
+	if (icp_hw_mgr.a5_debug_type ==
+		HFI_DEBUG_MODE_QUEUE)
+		cam_icp_mgr_process_dbg_buf();
+
+	if ((task_data->irq_status & A5_WDT_0) ||
+		(task_data->irq_status & A5_WDT_1)) {
+		CAM_ERR_RATE_LIMIT(CAM_ICP, "watch dog interrupt from A5");
+
+		rc = cam_icp_mgr_trigger_recovery(hw_mgr);
+	}
+
+	return rc;
+}
+
+int32_t cam_icp_hw_mgr_cb(uint32_t irq_status, void *data)
+{
+	int32_t rc = 0;
+	unsigned long flags;
+	struct cam_icp_hw_mgr *hw_mgr = data;
+	struct crm_workq_task *task;
+	struct hfi_msg_work_data *task_data;
+
+	if (!data) {
+		CAM_ERR(CAM_ICP, "irq cb data is NULL");
+		return rc;
+	}
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.msg_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_msg_work_data *)task->payload;
+	task_data->data = hw_mgr;
+	task_data->irq_status = irq_status;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_mgr_process_msg;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+	return rc;
+}
+
+static void cam_icp_free_hfi_mem(void)
+{
+	int rc;
+
+	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+	rc = cam_mem_mgr_free_memory_region(&icp_hw_mgr.hfi_mem.sec_heap);
+	if (rc)
+		CAM_ERR(CAM_ICP, "failed to unreserve sec heap");
+
+	cam_smmu_dealloc_qdss(icp_hw_mgr.iommu_hdl);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+}
+
+static int cam_icp_alloc_secheap_mem(struct cam_mem_mgr_memory_desc *secheap)
+{
+	int rc;
+	struct cam_mem_mgr_request_desc alloc;
+	struct cam_mem_mgr_memory_desc out;
+	struct cam_smmu_region_info secheap_info;
+
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_SMMU_REGION_SECHEAP,
+		&secheap_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to get secheap memory info");
+		return rc;
+	}
+
+	alloc.size = secheap_info.iova_len;
+	alloc.align = 0;
+	alloc.flags = 0;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_reserve_memory_region(&alloc,
+		CAM_SMMU_REGION_SECHEAP,
+		&out);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to reserve secheap memory");
+		return rc;
+	}
+
+	*secheap = out;
+	CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+		out.kva, out.iova, out.mem_handle, out.len);
+
+	return rc;
+}
+
+static int cam_icp_alloc_sfr_mem(struct cam_mem_mgr_memory_desc *sfr)
+{
+	int rc;
+	struct cam_mem_mgr_request_desc alloc;
+	struct cam_mem_mgr_memory_desc out;
+
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_8K;
+	alloc.align = 0;
+	alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE |
+		CAM_MEM_FLAG_HW_SHARED_ACCESS;
+
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc)
+		return rc;
+
+	*sfr = out;
+	CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+		out.kva, out.iova, out.mem_handle, out.len);
+
+	return rc;
+}
+
+static int cam_icp_alloc_shared_mem(struct cam_mem_mgr_memory_desc *qtbl)
+{
+	int rc;
+	struct cam_mem_mgr_request_desc alloc;
+	struct cam_mem_mgr_memory_desc out;
+
+	memset(&alloc, 0, sizeof(alloc));
+	memset(&out, 0, sizeof(out));
+	alloc.size = SZ_1M;
+	alloc.align = 0;
+	alloc.flags = CAM_MEM_FLAG_HW_READ_WRITE |
+		CAM_MEM_FLAG_HW_SHARED_ACCESS;
+	alloc.smmu_hdl = icp_hw_mgr.iommu_hdl;
+	rc = cam_mem_mgr_request_mem(&alloc, &out);
+	if (rc)
+		return rc;
+
+	*qtbl = out;
+	CAM_DBG(CAM_ICP, "kva: %llX, iova: %x, hdl: %x, len: %lld",
+		out.kva, out.iova, out.mem_handle, out.len);
+
+	return rc;
+}
+
+static int cam_icp_allocate_fw_mem(void)
+{
+	int rc;
+	uintptr_t kvaddr;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_alloc_firmware(icp_hw_mgr.iommu_hdl,
+		&iova, &kvaddr, &len);
+	if (rc)
+		return -ENOMEM;
+
+	icp_hw_mgr.hfi_mem.fw_buf.len = len;
+	icp_hw_mgr.hfi_mem.fw_buf.kva = kvaddr;
+	icp_hw_mgr.hfi_mem.fw_buf.iova = iova;
+	icp_hw_mgr.hfi_mem.fw_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
+
+	CAM_DBG(CAM_ICP, "kva: %zX, iova: %llx, len: %zu",
+		kvaddr, iova, len);
+
+	return rc;
+}
+
+static int cam_icp_allocate_qdss_mem(void)
+{
+	int rc;
+	size_t len;
+	dma_addr_t iova;
+
+	rc = cam_smmu_alloc_qdss(icp_hw_mgr.iommu_hdl,
+		&iova, &len);
+	if (rc)
+		return rc;
+
+	icp_hw_mgr.hfi_mem.qdss_buf.len = len;
+	icp_hw_mgr.hfi_mem.qdss_buf.iova = iova;
+	icp_hw_mgr.hfi_mem.qdss_buf.smmu_hdl = icp_hw_mgr.iommu_hdl;
+
+	CAM_DBG(CAM_ICP, "iova: %llx, len: %zu", iova, len);
+
+	return rc;
+}
+
+static int cam_icp_allocate_hfi_mem(void)
+{
+	int rc;
+
+	rc = cam_smmu_get_region_info(icp_hw_mgr.iommu_hdl,
+		CAM_SMMU_REGION_SHARED,
+		&icp_hw_mgr.hfi_mem.shmem);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to get shared memory info");
+		return rc;
+	}
+
+	rc = cam_icp_allocate_fw_mem();
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate FW memory");
+		return rc;
+	}
+
+	rc = cam_icp_allocate_qdss_mem();
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate qdss memory");
+		goto fw_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.qtbl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate qtbl memory");
+		goto qtbl_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate cmd q memory");
+		goto cmd_q_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.msg_q);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate msg q memory");
+		goto msg_q_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_shared_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate dbg q memory");
+		goto dbg_q_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_sfr_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate sfr buffer");
+		goto sfr_buf_alloc_failed;
+	}
+
+	rc = cam_icp_alloc_secheap_mem(&icp_hw_mgr.hfi_mem.sec_heap);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "Unable to allocate sec heap memory");
+		goto sec_heap_alloc_failed;
+	}
+
+	return rc;
+sec_heap_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.sfr_buf);
+sfr_buf_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.dbg_q);
+dbg_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.msg_q);
+msg_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.cmd_q);
+cmd_q_alloc_failed:
+	cam_mem_mgr_release_mem(&icp_hw_mgr.hfi_mem.qtbl);
+qtbl_alloc_failed:
+	cam_smmu_dealloc_qdss(icp_hw_mgr.iommu_hdl);
+fw_alloc_failed:
+	cam_smmu_dealloc_firmware(icp_hw_mgr.iommu_hdl);
+	return rc;
+}
+
+static int cam_icp_mgr_get_free_ctx(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int i = 0;
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++) {
+		mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+		if (hw_mgr->ctx_data[i].state == CAM_ICP_CTX_STATE_FREE) {
+			hw_mgr->ctx_data[i].state = CAM_ICP_CTX_STATE_IN_USE;
+			mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+			break;
+		}
+		mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+	}
+
+	return i;
+}
+
+static void cam_icp_mgr_put_ctx(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	ctx_data->state = CAM_ICP_CTX_STATE_FREE;
+}
+
+static int cam_icp_mgr_send_pc_prep(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+
+	reinit_completion(&hw_mgr->a5_complete);
+	CAM_DBG(CAM_ICP, "Sending HFI init command");
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv, CAM_ICP_A5_CMD_PC_PREP, NULL, 0);
+	if (rc)
+		return rc;
+
+	CAM_DBG(CAM_ICP, "Wait for PC_PREP_DONE Message\n");
+	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+		msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "PC_PREP response timed out %d\n", rc);
+	}
+	CAM_DBG(CAM_ICP, "Done Waiting for PC_PREP Message\n");
+
+	return rc;
+}
+
+static int cam_ipe_bps_deint(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+	if ((!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return 0;
+	}
+
+	if (ipe1_dev_intf && hw_mgr->ipe_clk_state) {
+		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv,
+				NULL, 0);
+	}
+
+	if (hw_mgr->ipe_clk_state)
+		ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	if (hw_mgr->bps_clk_state)
+		bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+
+
+	hw_mgr->bps_clk_state = false;
+	hw_mgr->ipe_clk_state = false;
+
+	return 0;
+}
+
+static int cam_icp_mgr_hw_close_u(void *hw_priv, void *hw_close_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+	int rc = 0;
+
+	CAM_DBG(CAM_ICP, "UMD calls close");
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "Null hw mgr");
+		return 0;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = cam_icp_mgr_hw_close(hw_mgr, NULL);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_icp_mgr_hw_close_k(void *hw_priv, void *hw_close_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+
+	CAM_DBG(CAM_ICP, "KMD calls close");
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "Null hw mgr");
+		return 0;
+	}
+
+	return cam_icp_mgr_hw_close(hw_mgr, NULL);
+
+}
+
+static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+
+	CAM_DBG(CAM_ICP, "ENTER");
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	if (!hw_mgr->icp_pc_flag || atomic_read(&hw_mgr->recovery)) {
+		cam_hfi_disable_cpu(
+			a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+		rc = cam_icp_mgr_hw_close_k(hw_mgr, NULL);
+	} else {
+		CAM_DBG(CAM_PERF, "Sending PC prep ICP PC enabled");
+		rc = cam_icp_mgr_send_pc_prep(hw_mgr);
+		cam_hfi_disable_cpu(
+			a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+	}
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+	CAM_DBG(CAM_ICP, "EXIT");
+
+	return rc;
+}
+
+static int cam_icp_mgr_hfi_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct hfi_mem_info hfi_mem;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid\n");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
+	hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
+	hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
+	 CAM_DBG(CAM_ICP, "qtbl kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.qtbl.kva, hfi_mem.qtbl.iova, hfi_mem.qtbl.len);
+
+	hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
+	hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
+	hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
+	CAM_DBG(CAM_ICP, "cmd_q kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.cmd_q.kva, hfi_mem.cmd_q.iova, hfi_mem.cmd_q.len);
+
+	hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
+	hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
+	hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
+	CAM_DBG(CAM_ICP, "msg_q kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.msg_q.kva, hfi_mem.msg_q.iova, hfi_mem.msg_q.len);
+
+	hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
+	hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
+	hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
+	CAM_DBG(CAM_ICP, "dbg_q kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.dbg_q.kva, hfi_mem.dbg_q.iova, hfi_mem.dbg_q.len);
+
+	hfi_mem.sfr_buf.kva = icp_hw_mgr.hfi_mem.sfr_buf.kva;
+	hfi_mem.sfr_buf.iova = icp_hw_mgr.hfi_mem.sfr_buf.iova;
+	hfi_mem.sfr_buf.len = icp_hw_mgr.hfi_mem.sfr_buf.len;
+	CAM_DBG(CAM_ICP, "sfr kva = %llX IOVA = %X length = %lld\n",
+		hfi_mem.sfr_buf.kva, hfi_mem.sfr_buf.iova,
+		hfi_mem.sfr_buf.len);
+
+	hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
+	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
+	hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+
+	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
+	hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
+	hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
+	return cam_hfi_resume(&hfi_mem,
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
+		hw_mgr->a5_jtag_debug);
+}
+
+static int cam_icp_mgr_abort_handle(
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	unsigned long rem_jiffies;
+	size_t packet_size;
+	int timeout = 100;
+	struct hfi_cmd_ipebps_async *abort_cmd;
+
+	packet_size =
+		sizeof(struct hfi_cmd_ipebps_async) +
+		sizeof(struct hfi_cmd_abort) -
+		sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
+	abort_cmd = kzalloc(packet_size, GFP_KERNEL);
+	CAM_DBG(CAM_ICP, "abort pkt size = %d", (int) packet_size);
+	if (!abort_cmd) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	abort_cmd->size = packet_size;
+	abort_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		abort_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_ABORT;
+	else
+		abort_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_ABORT;
+
+	reinit_completion(&ctx_data->wait_complete);
+	abort_cmd->num_fw_handles = 1;
+	abort_cmd->fw_handles[0] = ctx_data->fw_handle;
+	abort_cmd->user_data1 = PTR_TO_U64(ctx_data);
+	abort_cmd->user_data2 = (uint64_t)0x0;
+
+	rc = hfi_write_cmd(abort_cmd);
+	if (rc) {
+		kfree(abort_cmd);
+		return rc;
+	}
+	CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
+		ctx_data->fw_handle, ctx_data);
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW timeout/err in abort handle command");
+		cam_hfi_queue_dump();
+	}
+
+	kfree(abort_cmd);
+	return rc;
+}
+
+static int cam_icp_mgr_destroy_handle(
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	int timeout = 100;
+	unsigned long rem_jiffies;
+	size_t packet_size;
+	struct hfi_cmd_ipebps_async *destroy_cmd;
+
+	packet_size =
+		sizeof(struct hfi_cmd_ipebps_async) +
+		sizeof(struct hfi_cmd_abort_destroy) -
+		sizeof(((struct hfi_cmd_ipebps_async *)0)->payload.direct);
+	destroy_cmd = kzalloc(packet_size, GFP_KERNEL);
+	if (!destroy_cmd) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	destroy_cmd->size = packet_size;
+	destroy_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_DIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		destroy_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_DESTROY;
+	else
+		destroy_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_DESTROY;
+
+	reinit_completion(&ctx_data->wait_complete);
+	destroy_cmd->num_fw_handles = 1;
+	destroy_cmd->fw_handles[0] = ctx_data->fw_handle;
+	destroy_cmd->user_data1 = PTR_TO_U64(ctx_data);
+	destroy_cmd->user_data2 = (uint64_t)0x0;
+	memcpy(destroy_cmd->payload.direct, &ctx_data->temp_payload,
+		sizeof(uint64_t));
+
+	rc = hfi_write_cmd(destroy_cmd);
+	if (rc) {
+		kfree(destroy_cmd);
+		return rc;
+	}
+	CAM_DBG(CAM_ICP, "fw_handle = %x ctx_data = %pK",
+		ctx_data->fw_handle, ctx_data);
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timeout: %d for %u",
+			rc, ctx_data->ctx_id);
+		if (icp_hw_mgr.a5_debug_type ==
+			HFI_DEBUG_MODE_QUEUE)
+			cam_icp_mgr_process_dbg_buf();
+		cam_hfi_queue_dump();
+	}
+	kfree(destroy_cmd);
+	return rc;
+}
+
+static int cam_icp_mgr_release_ctx(struct cam_icp_hw_mgr *hw_mgr, int ctx_id)
+{
+	int i = 0;
+
+	if (ctx_id >= CAM_ICP_CTX_MAX) {
+		CAM_ERR(CAM_ICP, "ctx_id is wrong: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	if (hw_mgr->ctx_data[ctx_id].state !=
+		CAM_ICP_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+		CAM_DBG(CAM_ICP,
+			"ctx with id: %d not in right state to release: %d",
+			ctx_id, hw_mgr->ctx_data[ctx_id].state);
+		return 0;
+	}
+	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr,
+		&hw_mgr->ctx_data[ctx_id], 0);
+	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_RELEASE;
+	CAM_DBG(CAM_ICP, "E: ctx_id = %d recovery = %d",
+		ctx_id, atomic_read(&hw_mgr->recovery));
+	cam_icp_mgr_abort_handle(&hw_mgr->ctx_data[ctx_id]);
+	cam_icp_mgr_destroy_handle(&hw_mgr->ctx_data[ctx_id]);
+	cam_icp_mgr_cleanup_ctx(&hw_mgr->ctx_data[ctx_id]);
+
+	hw_mgr->ctx_data[ctx_id].fw_handle = 0;
+	hw_mgr->ctx_data[ctx_id].scratch_mem_size = 0;
+	for (i = 0; i < CAM_FRAME_CMD_MAX; i++)
+		clear_bit(i, hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+	kfree(hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap);
+	hw_mgr->ctx_data[ctx_id].hfi_frame_process.bitmap = NULL;
+	cam_icp_hw_mgr_clk_info_update(hw_mgr, &hw_mgr->ctx_data[ctx_id]);
+	hw_mgr->ctx_data[ctx_id].clk_info.curr_fc = 0;
+	hw_mgr->ctx_data[ctx_id].clk_info.base_clk = 0;
+	hw_mgr->ctxt_cnt--;
+	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+	hw_mgr->ctx_data[ctx_id].state = CAM_ICP_CTX_STATE_FREE;
+	cam_icp_ctx_timer_stop(&hw_mgr->ctx_data[ctx_id]);
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+	CAM_DBG(CAM_ICP, "X: ctx_id = %d", ctx_id);
+	return 0;
+}
+
+static void cam_icp_mgr_device_deinit(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong, failed to close");
+		return;
+	}
+
+	if (ipe1_dev_intf)
+		ipe1_dev_intf->hw_ops.deinit(ipe1_dev_intf->hw_priv, NULL, 0);
+	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+	hw_mgr->bps_clk_state = false;
+	hw_mgr->ipe_clk_state = false;
+}
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+	int rc = 0;
+
+	CAM_DBG(CAM_ICP, "E");
+	if (hw_mgr->fw_download == false) {
+		CAM_DBG(CAM_ICP, "hw mgr is already closed");
+		return 0;
+	}
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_DBG(CAM_ICP, "a5_dev_intf is NULL");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+	fw_buf_info.kva = 0;
+	fw_buf_info.iova = 0;
+	fw_buf_info.len = 0;
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_SET_FW_BUF,
+		&fw_buf_info,
+		sizeof(fw_buf_info));
+	if (rc)
+		CAM_ERR(CAM_ICP, "nullify the fw buf failed");
+	cam_hfi_deinit(
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+	irq_cb.icp_hw_mgr_cb = NULL;
+	irq_cb.data = NULL;
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc)
+		CAM_ERR(CAM_ICP, "deregister irq call back failed");
+
+	cam_icp_free_hfi_mem();
+	hw_mgr->fw_download = false;
+
+	CAM_DBG(CAM_ICP, "Exit");
+	return rc;
+}
+
+static int cam_icp_mgr_device_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_intf *ipe0_dev_intf = NULL;
+	struct cam_hw_intf *ipe1_dev_intf = NULL;
+	struct cam_hw_intf *bps_dev_intf = NULL;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	ipe0_dev_intf = hw_mgr->ipe0_dev_intf;
+	ipe1_dev_intf = hw_mgr->ipe1_dev_intf;
+	bps_dev_intf = hw_mgr->bps_dev_intf;
+
+	if ((!a5_dev_intf) || (!ipe0_dev_intf) || (!bps_dev_intf)) {
+		CAM_ERR(CAM_ICP, "dev intfs are wrong");
+		return -EINVAL;
+	}
+
+	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		goto a5_dev_init_failed;
+
+	rc = bps_dev_intf->hw_ops.init(bps_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		goto bps_dev_init_failed;
+
+	rc = ipe0_dev_intf->hw_ops.init(ipe0_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		goto ipe0_dev_init_failed;
+
+	if (ipe1_dev_intf) {
+		rc = ipe1_dev_intf->hw_ops.init(ipe1_dev_intf->hw_priv,
+						NULL, 0);
+		if (rc)
+			goto ipe1_dev_init_failed;
+	}
+
+	hw_mgr->bps_clk_state = true;
+	hw_mgr->ipe_clk_state = true;
+
+	return rc;
+ipe1_dev_init_failed:
+	ipe0_dev_intf->hw_ops.deinit(ipe0_dev_intf->hw_priv, NULL, 0);
+	hw_mgr->ipe_clk_state = false;
+ipe0_dev_init_failed:
+	bps_dev_intf->hw_ops.deinit(bps_dev_intf->hw_priv, NULL, 0);
+	hw_mgr->bps_clk_state = false;
+bps_dev_init_failed:
+	a5_dev_intf->hw_ops.deinit(a5_dev_intf->hw_priv, NULL, 0);
+a5_dev_init_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_fw_download(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_a5_set_irq_cb irq_cb;
+	struct cam_icp_a5_set_fw_buf_info fw_buf_info;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	irq_cb.icp_hw_mgr_cb = cam_icp_hw_mgr_cb;
+	irq_cb.data = hw_mgr;
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc)
+		goto set_irq_failed;
+
+	fw_buf_info.kva = icp_hw_mgr.hfi_mem.fw_buf.kva;
+	fw_buf_info.iova = icp_hw_mgr.hfi_mem.fw_buf.iova;
+	fw_buf_info.len = icp_hw_mgr.hfi_mem.fw_buf.len;
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_SET_FW_BUF,
+		&fw_buf_info, sizeof(fw_buf_info));
+	if (rc)
+		goto set_irq_failed;
+
+	cam_hfi_enable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_CMD_FW_DOWNLOAD,
+		NULL, 0);
+	if (rc)
+		goto fw_download_failed;
+
+	return rc;
+fw_download_failed:
+	cam_hfi_disable_cpu(a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+set_irq_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_hfi_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct hfi_mem_info hfi_mem;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+
+	hfi_mem.qtbl.kva = icp_hw_mgr.hfi_mem.qtbl.kva;
+	hfi_mem.qtbl.iova = icp_hw_mgr.hfi_mem.qtbl.iova;
+	hfi_mem.qtbl.len = icp_hw_mgr.hfi_mem.qtbl.len;
+
+	hfi_mem.cmd_q.kva = icp_hw_mgr.hfi_mem.cmd_q.kva;
+	hfi_mem.cmd_q.iova = icp_hw_mgr.hfi_mem.cmd_q.iova;
+	hfi_mem.cmd_q.len = icp_hw_mgr.hfi_mem.cmd_q.len;
+
+	hfi_mem.msg_q.kva = icp_hw_mgr.hfi_mem.msg_q.kva;
+	hfi_mem.msg_q.iova = icp_hw_mgr.hfi_mem.msg_q.iova;
+	hfi_mem.msg_q.len = icp_hw_mgr.hfi_mem.msg_q.len;
+
+	hfi_mem.dbg_q.kva = icp_hw_mgr.hfi_mem.dbg_q.kva;
+	hfi_mem.dbg_q.iova = icp_hw_mgr.hfi_mem.dbg_q.iova;
+	hfi_mem.dbg_q.len = icp_hw_mgr.hfi_mem.dbg_q.len;
+
+	hfi_mem.sfr_buf.kva = icp_hw_mgr.hfi_mem.sfr_buf.kva;
+	hfi_mem.sfr_buf.iova = icp_hw_mgr.hfi_mem.sfr_buf.iova;
+	hfi_mem.sfr_buf.len = icp_hw_mgr.hfi_mem.sfr_buf.len;
+
+	hfi_mem.sec_heap.kva = icp_hw_mgr.hfi_mem.sec_heap.kva;
+	hfi_mem.sec_heap.iova = icp_hw_mgr.hfi_mem.sec_heap.iova;
+	hfi_mem.sec_heap.len = icp_hw_mgr.hfi_mem.sec_heap.len;
+
+	hfi_mem.shmem.iova = icp_hw_mgr.hfi_mem.shmem.iova_start;
+	hfi_mem.shmem.len = icp_hw_mgr.hfi_mem.shmem.iova_len;
+
+	hfi_mem.qdss.iova = icp_hw_mgr.hfi_mem.qdss_buf.iova;
+	hfi_mem.qdss.len = icp_hw_mgr.hfi_mem.qdss_buf.len;
+
+	return cam_hfi_init(0, &hfi_mem,
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base,
+		hw_mgr->a5_jtag_debug);
+}
+
+static int cam_icp_mgr_send_fw_init(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+
+	reinit_completion(&hw_mgr->a5_complete);
+	CAM_DBG(CAM_ICP, "Sending HFI init command");
+	rc = a5_dev_intf->hw_ops.process_cmd(
+		a5_dev_intf->hw_priv,
+		CAM_ICP_A5_SEND_INIT,
+		NULL, 0);
+	if (rc)
+		return rc;
+
+	rem_jiffies = wait_for_completion_timeout(&icp_hw_mgr.a5_complete,
+		msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_hfi_queue_dump();
+	}
+	CAM_DBG(CAM_ICP, "Done Waiting for INIT DONE Message");
+
+	return rc;
+}
+
+static int cam_icp_mgr_hw_open_u(void *hw_mgr_priv, void *download_fw_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	int rc = 0;
+
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "Null hw mgr");
+		return 0;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = cam_icp_mgr_hw_open(hw_mgr, download_fw_args);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_icp_mgr_hw_open_k(void *hw_mgr_priv, void *download_fw_args)
+{
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "Null hw mgr");
+		return 0;
+	}
+
+	return cam_icp_mgr_hw_open(hw_mgr, download_fw_args);
+}
+
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr)
+{
+	int rc = 0;
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	bool downloadFromResume = true;
+
+	CAM_DBG(CAM_ICP, "Enter");
+	a5_dev_intf = hw_mgr->devices[CAM_ICP_DEV_A5][0];
+
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5 dev intf is wrong");
+		return -EINVAL;
+	}
+
+	if (hw_mgr->fw_download  == false) {
+		CAM_DBG(CAM_ICP, "Downloading FW");
+		rc = cam_icp_mgr_hw_open_k(hw_mgr, &downloadFromResume);
+		CAM_DBG(CAM_ICP, "FW Download Done Exit");
+		return rc;
+	}
+
+	rc = a5_dev_intf->hw_ops.init(a5_dev_intf->hw_priv, NULL, 0);
+	if (rc)
+		return -EINVAL;
+
+	rc = cam_icp_mgr_hfi_resume(hw_mgr);
+	if (rc)
+		goto hfi_resume_failed;
+
+	CAM_DBG(CAM_ICP, "Exit");
+	return rc;
+hfi_resume_failed:
+	cam_icp_mgr_icp_power_collapse(hw_mgr);
+	return rc;
+}
+
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args)
+{
+	struct cam_hw_intf *a5_dev_intf = NULL;
+	struct cam_hw_info *a5_dev = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	bool icp_pc = false;
+	int rc = 0;
+
+	if (!hw_mgr) {
+		CAM_ERR(CAM_ICP, "hw_mgr is NULL");
+		return -EINVAL;
+	}
+
+	if (hw_mgr->fw_download) {
+		CAM_DBG(CAM_ICP, "FW already downloaded");
+		return rc;
+	}
+
+	a5_dev_intf = hw_mgr->a5_dev_intf;
+	if (!a5_dev_intf) {
+		CAM_ERR(CAM_ICP, "a5_dev_intf is invalid");
+		return -EINVAL;
+	}
+	a5_dev = (struct cam_hw_info *)a5_dev_intf->hw_priv;
+	rc = cam_icp_allocate_hfi_mem();
+	if (rc)
+		goto alloc_hfi_mem_failed;
+
+	rc = cam_icp_mgr_device_init(hw_mgr);
+	if (rc)
+		goto dev_init_fail;
+
+	rc = cam_icp_mgr_fw_download(hw_mgr);
+	if (rc)
+		goto fw_download_failed;
+
+	rc = cam_icp_mgr_hfi_init(hw_mgr);
+	if (rc)
+		goto hfi_init_failed;
+
+	rc = cam_icp_mgr_send_fw_init(hw_mgr);
+	if (rc)
+		goto fw_init_failed;
+
+	hw_mgr->ctxt_cnt = 0;
+	hw_mgr->fw_download = true;
+	atomic_set(&hw_mgr->recovery, 0);
+
+	CAM_INFO(CAM_ICP, "FW download done successfully");
+
+	rc = cam_ipe_bps_deint(hw_mgr);
+	if (download_fw_args)
+		icp_pc = *((bool *)download_fw_args);
+
+	if (download_fw_args && icp_pc == true && hw_mgr->icp_pc_flag) {
+		rc = cam_ipe_bps_deint(hw_mgr);
+		CAM_DBG(CAM_ICP, "deinit all clocks");
+	}
+
+	if (download_fw_args && icp_pc == true)
+		return rc;
+
+	rc = cam_ipe_bps_deint(hw_mgr);
+	rc = cam_icp_mgr_icp_power_collapse(hw_mgr);
+	CAM_DBG(CAM_ICP, "deinit all clocks at boot up");
+
+	return rc;
+
+fw_init_failed:
+	cam_hfi_deinit(
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+hfi_init_failed:
+	cam_hfi_disable_cpu(
+		a5_dev->soc_info.reg_map[A5_SIERRA_BASE].mem_base);
+fw_download_failed:
+	cam_icp_mgr_device_deinit(hw_mgr);
+dev_init_fail:
+	cam_icp_free_hfi_mem();
+alloc_hfi_mem_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_handle_config_err(
+	struct cam_hw_config_args *config_args,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	int idx)
+{
+	struct cam_hw_done_event_data buf_data;
+
+	buf_data.request_id = *(uint64_t *)config_args->priv;
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, false, &buf_data);
+
+	ctx_data->hfi_frame_process.request_id[idx] = 0;
+	ctx_data->hfi_frame_process.fw_process_flag[idx] = false;
+	clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+
+	return 0;
+}
+
+static int cam_icp_mgr_enqueue_config(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_hw_config_args *config_args)
+{
+	int rc = 0;
+	uint64_t request_id = 0;
+	struct crm_workq_task *task;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async *hfi_cmd;
+	struct cam_hw_update_entry *hw_update_entries;
+	struct icp_frame_info *frame_info = NULL;
+
+	frame_info = (struct icp_frame_info *)config_args->priv;
+	request_id = frame_info->request_id;
+	hw_update_entries = config_args->hw_update_entries;
+	CAM_DBG(CAM_ICP, "req_id = %lld %pK", request_id, config_args->priv);
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)hw_update_entries->addr;
+	hfi_cmd = (struct hfi_cmd_ipebps_async *)hw_update_entries->addr;
+	task_data->request_id = request_id;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_icp_mgr_send_config_io(struct cam_icp_hw_ctx_data *ctx_data,
+	uint32_t io_buf_addr)
+{
+	int rc = 0;
+	struct hfi_cmd_work_data *task_data;
+	struct hfi_cmd_ipebps_async ioconfig_cmd;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+	struct crm_workq_task *task;
+	uint32_t size_in_words;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
+	ioconfig_cmd.size = sizeof(struct hfi_cmd_ipebps_async);
+	ioconfig_cmd.pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+	else
+		ioconfig_cmd.opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+	reinit_completion(&ctx_data->wait_complete);
+
+	ioconfig_cmd.num_fw_handles = 1;
+	ioconfig_cmd.fw_handles[0] = ctx_data->fw_handle;
+	ioconfig_cmd.payload.indirect = io_buf_addr;
+	ioconfig_cmd.user_data1 = PTR_TO_U64(ctx_data);
+	ioconfig_cmd.user_data2 = (uint64_t)0x0;
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&ioconfig_cmd;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	size_in_words = (*(uint32_t *)task_data->data) >> 2;
+	CAM_DBG(CAM_ICP, "size_in_words %u", size_in_words);
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+		msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_hfi_queue_dump();
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_send_recfg_io(struct cam_icp_hw_ctx_data *ctx_data,
+	struct hfi_cmd_ipebps_async *ioconfig_cmd, uint64_t req_id)
+{
+	int rc = 0;
+	struct hfi_cmd_work_data *task_data;
+	struct crm_workq_task *task;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)ioconfig_cmd;
+	task_data->request_id = req_id;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+static int cam_icp_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+	int rc = 0;
+	int idx;
+	uint64_t req_id;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_config_args *config_args = config_hw_args;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct icp_frame_info *frame_info = NULL;
+
+	if (!hw_mgr || !config_args) {
+		CAM_ERR(CAM_ICP, "Invalid arguments %pK %pK",
+			hw_mgr, config_args);
+		return -EINVAL;
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_ICP, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	ctx_data = config_args->ctxt_to_hw_map;
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_ICP, "ctx id :%u is not in use",
+			ctx_data->ctx_id);
+		return -EINVAL;
+	}
+
+	frame_info = (struct icp_frame_info *)config_args->priv;
+	req_id = frame_info->request_id;
+	idx = cam_icp_clk_idx_from_req_id(ctx_data, req_id);
+	cam_icp_mgr_ipe_bps_clk_update(hw_mgr, ctx_data, idx);
+	ctx_data->hfi_frame_process.fw_process_flag[idx] = true;
+
+	CAM_DBG(CAM_ICP, "req_id %llu, io config %llu", req_id,
+		frame_info->io_config);
+
+	if (frame_info->io_config != 0) {
+		CAM_INFO(CAM_ICP, "Send recfg io");
+		rc = cam_icp_mgr_send_recfg_io(ctx_data,
+			&frame_info->hfi_cfg_io_cmd, req_id);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Fail to send reconfig io cmd");
+	}
+
+	rc = cam_icp_mgr_enqueue_config(hw_mgr, config_args);
+	if (rc)
+		goto config_err;
+	CAM_DBG(CAM_REQ,
+		"req_id = %lld on ctx_id %u for dev %d queued to FW",
+		req_id, ctx_data->ctx_id,
+		ctx_data->icp_dev_acquire_info->dev_type);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return 0;
+config_err:
+	cam_icp_mgr_handle_config_err(config_args, ctx_data, idx);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_prepare_frame_process_cmd(
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct hfi_cmd_ipebps_async *hfi_cmd,
+	uint64_t request_id,
+	uint32_t fw_cmd_buf_iova_addr)
+{
+	hfi_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
+	hfi_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_FRAME_PROCESS;
+	else
+		hfi_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_FRAME_PROCESS;
+	hfi_cmd->num_fw_handles = 1;
+	hfi_cmd->fw_handles[0] = ctx_data->fw_handle;
+	hfi_cmd->payload.indirect = fw_cmd_buf_iova_addr;
+	hfi_cmd->user_data1 = PTR_TO_U64(ctx_data);
+	hfi_cmd->user_data2 = request_id;
+
+	CAM_DBG(CAM_ICP, "ctx_data : %pK, request_id :%lld cmd_buf %x",
+		(void *)ctx_data->context_priv, request_id,
+		fw_cmd_buf_iova_addr);
+
+	return 0;
+}
+
+static int cam_icp_mgr_pkt_validation(struct cam_packet *packet)
+{
+	if (((packet->header.op_code & 0xff) !=
+		CAM_ICP_OPCODE_IPE_UPDATE) &&
+		((packet->header.op_code & 0xff) !=
+		CAM_ICP_OPCODE_BPS_UPDATE)) {
+		CAM_ERR(CAM_ICP, "Invalid Opcode in pkt: %d",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	if (packet->num_io_configs > IPE_IO_IMAGES_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid number of io configs: %d %d",
+			IPE_IO_IMAGES_MAX, packet->num_io_configs);
+		return -EINVAL;
+	}
+
+	if (packet->num_cmd_buf > CAM_ICP_CTX_MAX_CMD_BUFFERS) {
+		CAM_ERR(CAM_ICP, "Invalid number of cmd buffers: %d %d",
+			CAM_ICP_CTX_MAX_CMD_BUFFERS, packet->num_cmd_buf);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "number of cmd/patch info: %u %u %u %u",
+			packet->num_cmd_buf,
+			packet->num_io_configs, IPE_IO_IMAGES_MAX,
+			packet->num_patches);
+	return 0;
+}
+
+static int cam_icp_mgr_process_cmd_desc(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_packet *packet, struct cam_icp_hw_ctx_data *ctx_data,
+	uint32_t *fw_cmd_buf_iova_addr)
+{
+	int rc = 0;
+	int i, j, k;
+	uint64_t addr;
+	size_t len;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	uintptr_t cpu_addr = 0;
+	struct ipe_frame_process_data *frame_process_data = NULL;
+	struct bps_frame_process_data *bps_frame_process_data = NULL;
+	struct frame_set *ipe_set = NULL;
+	struct frame_buffer *bps_bufs = NULL;
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+
+	*fw_cmd_buf_iova_addr = 0;
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (cmd_desc[i].type == CAM_CMD_BUF_FW) {
+			rc = cam_mem_get_io_buf(cmd_desc[i].mem_handle,
+				hw_mgr->iommu_hdl, &addr, &len);
+			if (rc) {
+				CAM_ERR(CAM_ICP, "get cmd buf failed %x",
+					hw_mgr->iommu_hdl);
+				return rc;
+			}
+			*fw_cmd_buf_iova_addr = addr;
+			*fw_cmd_buf_iova_addr =
+				(*fw_cmd_buf_iova_addr + cmd_desc[i].offset);
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+				&cpu_addr, &len);
+			if (rc) {
+				CAM_ERR(CAM_ICP, "get cmd buf failed %x",
+					hw_mgr->iommu_hdl);
+				*fw_cmd_buf_iova_addr = 0;
+				return rc;
+			}
+			cpu_addr = cpu_addr + cmd_desc[i].offset;
+		}
+	}
+
+	if (!cpu_addr) {
+		CAM_ERR(CAM_ICP, "Invalid cpu addr");
+		return -EINVAL;
+	}
+
+	if (ctx_data->icp_dev_acquire_info->dev_type !=
+		CAM_ICP_RES_TYPE_BPS) {
+		CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
+		frame_process_data = (struct ipe_frame_process_data *)cpu_addr;
+		CAM_DBG(CAM_ICP, "%u %u %u", frame_process_data->max_num_cores,
+			frame_process_data->target_time,
+			frame_process_data->frames_in_batch);
+		frame_process_data->strip_lib_out_addr = 0;
+		frame_process_data->iq_settings_addr = 0;
+		frame_process_data->scratch_buffer_addr = 0;
+		frame_process_data->ubwc_stats_buffer_addr = 0;
+		frame_process_data->cdm_buffer_addr = 0;
+		frame_process_data->cdm_prog_base = 0;
+		for (i = 0; i < frame_process_data->frames_in_batch; i++) {
+			ipe_set = &frame_process_data->framesets[i];
+			for (j = 0; j < IPE_IO_IMAGES_MAX; j++) {
+				for (k = 0; k < MAX_NUM_OF_IMAGE_PLANES; k++) {
+					ipe_set->buffers[j].buf_ptr[k] = 0;
+					ipe_set->buffers[j].meta_buf_ptr[k] = 0;
+				}
+			}
+		}
+	} else {
+		CAM_DBG(CAM_ICP, "cpu addr = %zx", cpu_addr);
+		bps_frame_process_data =
+			(struct bps_frame_process_data *)cpu_addr;
+		CAM_DBG(CAM_ICP, "%u %u",
+			bps_frame_process_data->max_num_cores,
+			bps_frame_process_data->target_time);
+		bps_frame_process_data->ubwc_stats_buffer_addr = 0;
+		bps_frame_process_data->cdm_buffer_addr = 0;
+		bps_frame_process_data->iq_settings_addr = 0;
+		bps_frame_process_data->strip_lib_out_addr = 0;
+		bps_frame_process_data->cdm_prog_addr = 0;
+		for (i = 0; i < BPS_IO_IMAGES_MAX; i++) {
+			bps_bufs = &bps_frame_process_data->buffers[i];
+			for (j = 0; j < MAX_NUM_OF_IMAGE_PLANES; j++) {
+				bps_bufs->buf_ptr[j] = 0;
+				bps_bufs->meta_buf_ptr[j] = 0;
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_process_io_cfg(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prepare_args,
+	int32_t index)
+{
+	int i, j, k, rc = 0;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+	int32_t sync_in_obj[CAM_MAX_IN_RES];
+	int32_t merged_sync_in_obj;
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *) ((uint32_t *) &packet->payload +
+				packet->io_configs_offset/4);
+	prepare_args->num_out_map_entries = 0;
+	prepare_args->num_in_map_entries = 0;
+
+	for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+			sync_in_obj[j++] = io_cfg_ptr[i].fence;
+			prepare_args->num_in_map_entries++;
+		} else {
+			prepare_args->out_map_entries[k++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_out_map_entries++;
+		}
+		CAM_DBG(CAM_REQ,
+			"ctx_id: %u req_id: %llu dir[%d]: %u, fence: %u resource_type = %u memh %x",
+			ctx_data->ctx_id, packet->header.request_id, i,
+			io_cfg_ptr[i].direction, io_cfg_ptr[i].fence,
+			io_cfg_ptr[i].resource_type,
+			io_cfg_ptr[i].mem_handle[0]);
+	}
+
+	if (prepare_args->num_in_map_entries > 1)
+		prepare_args->num_in_map_entries =
+			cam_common_util_remove_duplicate_arr(
+			sync_in_obj, prepare_args->num_in_map_entries);
+
+	if (prepare_args->num_in_map_entries > 1) {
+		rc = cam_sync_merge(&sync_in_obj[0],
+			prepare_args->num_in_map_entries, &merged_sync_in_obj);
+		if (rc) {
+			prepare_args->num_out_map_entries = 0;
+			prepare_args->num_in_map_entries = 0;
+			return rc;
+		}
+
+		ctx_data->hfi_frame_process.in_resource[index] =
+			merged_sync_in_obj;
+		prepare_args->in_map_entries[0].sync_id = merged_sync_in_obj;
+		prepare_args->num_in_map_entries = 1;
+		CAM_DBG(CAM_REQ, "ctx_id: %u req_id: %llu Merged Sync obj: %d",
+			ctx_data->ctx_id, packet->header.request_id,
+			merged_sync_in_obj);
+	} else if (prepare_args->num_in_map_entries == 1) {
+		prepare_args->in_map_entries[0].sync_id = sync_in_obj[0];
+		prepare_args->num_in_map_entries = 1;
+		ctx_data->hfi_frame_process.in_resource[index] = 0;
+	} else {
+		CAM_ERR(CAM_ICP, "No input fences");
+		prepare_args->num_in_map_entries = 0;
+		ctx_data->hfi_frame_process.in_resource[index] = 0;
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_icp_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	struct cam_icp_clk_bw_request *soc_req;
+	struct cam_icp_clk_bw_request *clk_info;
+	struct icp_cmd_generic_blob *blob;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	uint32_t index;
+	size_t io_buf_size;
+	int rc = 0;
+	uintptr_t pResource;
+
+	if (!blob_data || (blob_size == 0)) {
+		CAM_ERR(CAM_ICP, "Invalid blob info %pK %d", blob_data,
+			blob_size);
+		return -EINVAL;
+	}
+
+	blob = (struct icp_cmd_generic_blob *)user_data;
+	ctx_data = blob->ctx;
+	index = blob->frame_info_idx;
+
+	switch (blob_type) {
+	case CAM_ICP_CMD_GENERIC_BLOB_CLK:
+		if (blob_size != sizeof(struct cam_icp_clk_bw_request)) {
+			rc = -EINVAL;
+			break;
+		}
+		clk_info = &ctx_data->hfi_frame_process.clk_info[index];
+		memset(clk_info, 0, sizeof(struct cam_icp_clk_bw_request));
+
+		soc_req = (struct cam_icp_clk_bw_request *)blob_data;
+		*clk_info = *soc_req;
+		CAM_DBG(CAM_ICP, "%llu %llu %d %d %d",
+			clk_info->budget_ns, clk_info->frame_cycles,
+			clk_info->rt_flag, clk_info->uncompressed_bw,
+			clk_info->compressed_bw);
+		break;
+
+	case CAM_ICP_CMD_GENERIC_BLOB_CFG_IO:
+		CAM_DBG(CAM_ICP, "CAM_ICP_CMD_GENERIC_BLOB_CFG_IO");
+		pResource = *((uint32_t *)blob_data);
+		if (copy_from_user(&ctx_data->icp_dev_io_info,
+			(void __user *)pResource,
+			sizeof(struct cam_icp_acquire_dev_info))) {
+			CAM_ERR(CAM_ICP, "Failed in copy from user");
+			return -EFAULT;
+		}
+		CAM_DBG(CAM_ICP, "buf handle %d",
+			ctx_data->icp_dev_io_info.io_config_cmd_handle);
+		rc = cam_mem_get_io_buf(
+			ctx_data->icp_dev_io_info.io_config_cmd_handle,
+			icp_hw_mgr.iommu_hdl,
+			blob->io_buf_addr, &io_buf_size);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed in blob update");
+		else
+			CAM_DBG(CAM_ICP, "io buf addr %llu",
+				*blob->io_buf_addr);
+		break;
+
+	default:
+		CAM_WARN(CAM_ICP, "Invalid blob type %d", blob_type);
+		break;
+	}
+	return rc;
+}
+
+static int cam_icp_process_generic_cmd_buffer(
+	struct cam_packet *packet,
+	struct cam_icp_hw_ctx_data *ctx_data,
+	int32_t index,
+	uint64_t *io_buf_addr)
+{
+	int i, rc = 0;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct icp_cmd_generic_blob cmd_generic_blob;
+
+	cmd_generic_blob.ctx = ctx_data;
+	cmd_generic_blob.frame_info_idx = index;
+	cmd_generic_blob.io_buf_addr = io_buf_addr;
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *) &packet->payload + packet->cmd_buf_offset/4);
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if (cmd_desc[i].meta_data != CAM_ICP_CMD_META_GENERIC_BLOB)
+			continue;
+
+		rc = cam_packet_util_process_generic_cmd_buffer(&cmd_desc[i],
+			cam_icp_packet_generic_blob_handler, &cmd_generic_blob);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed in processing blobs %d", rc);
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_process_cfg_io_cmd(
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct hfi_cmd_ipebps_async *ioconfig_cmd,
+	uint64_t request_id,
+	uint64_t io_config)
+{
+	ioconfig_cmd->size = sizeof(struct hfi_cmd_ipebps_async);
+	ioconfig_cmd->pkt_type = HFI_CMD_IPEBPS_ASYNC_COMMAND_INDIRECT;
+	if (ctx_data->icp_dev_acquire_info->dev_type == CAM_ICP_RES_TYPE_BPS)
+		ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_BPS_CONFIG_IO;
+	else
+		ioconfig_cmd->opcode = HFI_IPEBPS_CMD_OPCODE_IPE_CONFIG_IO;
+
+	ioconfig_cmd->num_fw_handles = 1;
+	ioconfig_cmd->fw_handles[0] = ctx_data->fw_handle;
+	ioconfig_cmd->payload.indirect = io_config;
+	ioconfig_cmd->user_data1 = PTR_TO_U64(ctx_data);
+	ioconfig_cmd->user_data2 = request_id;
+
+	return 0;
+}
+
+static int cam_icp_mgr_update_hfi_frame_process(
+	struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_packet *packet,
+	struct cam_hw_prepare_update_args *prepare_args,
+	int32_t *idx)
+{
+	int32_t index, rc;
+	struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+
+	index = find_first_zero_bit(ctx_data->hfi_frame_process.bitmap,
+		ctx_data->hfi_frame_process.bits);
+	if (index < 0 || index >= CAM_FRAME_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "request idx is wrong: %d", index);
+		return -EINVAL;
+	}
+	set_bit(index, ctx_data->hfi_frame_process.bitmap);
+
+	ctx_data->hfi_frame_process.request_id[index] =
+		packet->header.request_id;
+	ctx_data->hfi_frame_process.frame_info[index].request_id =
+		packet->header.request_id;
+	ctx_data->hfi_frame_process.frame_info[index].io_config = 0;
+	rc = cam_icp_process_generic_cmd_buffer(packet, ctx_data, index,
+		&ctx_data->hfi_frame_process.frame_info[index].io_config);
+	if (rc) {
+		clear_bit(index, ctx_data->hfi_frame_process.bitmap);
+		ctx_data->hfi_frame_process.request_id[index] = -1;
+		return rc;
+	}
+
+	if (ctx_data->hfi_frame_process.frame_info[index].io_config)  {
+		hfi_cmd = (struct hfi_cmd_ipebps_async *)
+		&ctx_data->hfi_frame_process.frame_info[index].hfi_cfg_io_cmd;
+		rc = cam_icp_mgr_process_cfg_io_cmd(ctx_data, hfi_cmd,
+			packet->header.request_id,
+		ctx_data->hfi_frame_process.frame_info[index].io_config);
+	}
+	*idx = index;
+
+	return rc;
+}
+
+static void cam_icp_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	uint64_t   iova_addr;
+	size_t     src_buf_size;
+	int        i;
+	int        j;
+	int        rc = 0;
+	int32_t    mmu_hdl;
+
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_ICP,
+					"Found PF at port: %d mem %x fd: %x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_ICP, "port: %d f: %u format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_UTIL, "get src buf address fail");
+				continue;
+			}
+			if (iova_addr >> 32) {
+				CAM_ERR(CAM_ICP, "Invalid mapped address");
+				rc = -EINVAL;
+				continue;
+			}
+
+			CAM_INFO(CAM_ICP,
+				"pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+				j, io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				(int32_t)src_buf_size,
+				(unsigned int)iova_addr,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+
+			iova_addr += io_cfg[i].offsets[j];
+
+		}
+	}
+}
+
+static int cam_icp_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int        rc = 0;
+	int32_t    idx;
+	uint32_t   fw_cmd_buf_iova_addr;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct cam_packet *packet = NULL;
+	struct hfi_cmd_ipebps_async *hfi_cmd = NULL;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_prepare_update_args *prepare_args =
+		prepare_hw_update_args;
+
+	if ((!prepare_args) || (!hw_mgr) || (!prepare_args->packet)) {
+		CAM_ERR(CAM_ICP, "Invalid args");
+		return -EINVAL;
+	}
+
+	ctx_data = prepare_args->ctxt_to_hw_map;
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (ctx_data->state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		CAM_ERR(CAM_ICP, "ctx id: %u is not in use",
+			ctx_data->ctx_id);
+		return -EINVAL;
+	}
+
+	packet = prepare_args->packet;
+
+	rc = cam_icp_mgr_pkt_validation(packet);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	rc = cam_icp_mgr_process_cmd_desc(hw_mgr, packet,
+		ctx_data, &fw_cmd_buf_iova_addr);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	prepare_args->pf_data->packet = packet;
+
+	CAM_DBG(CAM_REQ, "req id = %lld for ctx = %u",
+		packet->header.request_id, ctx_data->ctx_id);
+	/* Update Buffer Address from handles and patch information */
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl,
+		hw_mgr->iommu_sec_hdl);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	rc = cam_icp_mgr_update_hfi_frame_process(ctx_data, packet,
+		prepare_args, &idx);
+	if (rc) {
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	rc = cam_icp_mgr_process_io_cfg(hw_mgr, ctx_data,
+		packet, prepare_args, idx);
+	if (rc) {
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0)
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[idx]);
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		ctx_data->hfi_frame_process.request_id[idx] = -1;
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return rc;
+	}
+
+	hfi_cmd = (struct hfi_cmd_ipebps_async *)
+			&ctx_data->hfi_frame_process.hfi_frame_cmd[idx];
+	cam_icp_mgr_prepare_frame_process_cmd(
+		ctx_data, hfi_cmd, packet->header.request_id,
+		fw_cmd_buf_iova_addr);
+
+	prepare_args->num_hw_update_entries = 1;
+	prepare_args->hw_update_entries[0].addr = (uintptr_t)hfi_cmd;
+	prepare_args->priv = &ctx_data->hfi_frame_process.frame_info[idx];
+
+	CAM_DBG(CAM_ICP, "X: req id = %lld ctx_id = %u",
+		packet->header.request_id, ctx_data->ctx_id);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_send_abort_status(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		ctx_data->ctxt_event_cb(ctx_data->context_priv, true,
+			&hfi_frame_process->request_id[idx]);
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			CAM_DBG(CAM_ICP, "Delete merged sync in object: %d",
+				ctx_data->hfi_frame_process.in_resource[idx]);
+			cam_sync_destroy(
+				ctx_data->hfi_frame_process.in_resource[idx]);
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+	}
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return 0;
+}
+
+static int cam_icp_mgr_delete_sync(void *priv, void *data)
+{
+	struct hfi_cmd_work_data *task_data = NULL;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_ICP, "Invalid params%pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)data;
+	ctx_data = task_data->data;
+
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Null Context");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->in_free_resource[idx])
+			continue;
+		//cam_sync_destroy(
+			//ctx_data->hfi_frame_process.in_free_resource[idx]);
+		ctx_data->hfi_frame_process.in_resource[idx] = 0;
+	}
+	mutex_unlock(&ctx_data->ctx_mutex);
+	return 0;
+}
+
+static int cam_icp_mgr_delete_sync_obj(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int rc = 0;
+	struct crm_workq_task *task;
+	struct hfi_cmd_work_data *task_data;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "no empty task");
+		return -ENOMEM;
+	}
+
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)ctx_data;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_delete_sync;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_icp_mgr_flush_all(struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+	bool clear_in_resource = false;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			ctx_data->hfi_frame_process.in_free_resource[idx] =
+				ctx_data->hfi_frame_process.in_resource[idx];
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+		}
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		clear_in_resource = true;
+	}
+
+	if (clear_in_resource)
+		cam_icp_mgr_delete_sync_obj(ctx_data);
+
+	return 0;
+}
+
+static int cam_icp_mgr_flush_req(struct cam_icp_hw_ctx_data *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	int64_t request_id;
+	struct hfi_frame_process_info *hfi_frame_process;
+	int idx;
+	bool clear_in_resource = false;
+
+	hfi_frame_process = &ctx_data->hfi_frame_process;
+	request_id = *(int64_t *)flush_args->flush_req_pending[0];
+	for (idx = 0; idx < CAM_FRAME_CMD_MAX; idx++) {
+		if (!hfi_frame_process->request_id[idx])
+			continue;
+
+		if (hfi_frame_process->request_id[idx] != request_id)
+			continue;
+
+		/* now release memory for hfi frame process command */
+		hfi_frame_process->request_id[idx] = 0;
+		if (ctx_data->hfi_frame_process.in_resource[idx] > 0) {
+			ctx_data->hfi_frame_process.in_free_resource[idx] =
+				ctx_data->hfi_frame_process.in_resource[idx];
+			ctx_data->hfi_frame_process.in_resource[idx] = 0;
+		}
+		clear_bit(idx, ctx_data->hfi_frame_process.bitmap);
+		clear_in_resource = true;
+	}
+
+	if (clear_in_resource)
+		cam_icp_mgr_delete_sync_obj(ctx_data);
+
+	return 0;
+}
+
+static int cam_icp_mgr_hw_flush(void *hw_priv, void *hw_flush_args)
+{
+	struct cam_hw_flush_args *flush_args = hw_flush_args;
+	struct cam_icp_hw_ctx_data *ctx_data;
+	struct cam_icp_hw_mgr *hw_mgr = hw_priv;
+
+	if ((!hw_priv) || (!hw_flush_args)) {
+		CAM_ERR(CAM_ICP, "Input params are Null:");
+		return -EINVAL;
+	}
+
+	ctx_data = flush_args->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "Ctx data is NULL");
+		return -EINVAL;
+	}
+
+	if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+		(flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+		CAM_ERR(CAM_ICP, "Invalid lush type: %d",
+			flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_REQ, "ctx_id %d Flush type %d",
+		ctx_data->ctx_id, flush_args->flush_type);
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		mutex_lock(&hw_mgr->hw_mgr_mutex);
+		if (!atomic_read(&hw_mgr->recovery)
+			&& flush_args->num_req_active) {
+			mutex_unlock(&hw_mgr->hw_mgr_mutex);
+			cam_icp_mgr_abort_handle(ctx_data);
+		} else {
+			mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		}
+		mutex_lock(&ctx_data->ctx_mutex);
+		cam_icp_mgr_flush_all(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	case CAM_FLUSH_TYPE_REQ:
+		mutex_lock(&ctx_data->ctx_mutex);
+		if (flush_args->num_req_active) {
+			CAM_ERR(CAM_ICP, "Flush request is not supported");
+			mutex_unlock(&ctx_data->ctx_mutex);
+			return -EINVAL;
+		}
+		if (flush_args->num_req_pending)
+			cam_icp_mgr_flush_req(ctx_data, flush_args);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid flush type: %d",
+			flush_args->flush_type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_icp_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+	int rc = 0;
+	int ctx_id = 0;
+	struct cam_hw_release_args *release_hw = release_hw_args;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+
+	if (!release_hw || !hw_mgr) {
+		CAM_ERR(CAM_ICP, "Invalid args: %pK %pK", release_hw, hw_mgr);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "Enter recovery set %d",
+		atomic_read(&hw_mgr->recovery));
+	ctx_data = release_hw->ctxt_to_hw_map;
+	if (!ctx_data) {
+		CAM_ERR(CAM_ICP, "NULL ctx data");
+		return -EINVAL;
+	}
+
+	ctx_id = ctx_data->ctx_id;
+	if (ctx_id < 0 || ctx_id >= CAM_ICP_CTX_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid ctx id: %d", ctx_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+	if (hw_mgr->ctx_data[ctx_id].state != CAM_ICP_CTX_STATE_ACQUIRED) {
+		CAM_DBG(CAM_ICP, "ctx is not in use: %d", ctx_id);
+		mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->ctx_data[ctx_id].ctx_mutex);
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (!atomic_read(&hw_mgr->recovery) && release_hw->active_req) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		cam_icp_mgr_abort_handle(ctx_data);
+		cam_icp_mgr_send_abort_status(ctx_data);
+	} else {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	rc = cam_icp_mgr_release_ctx(hw_mgr, ctx_id);
+	if (!hw_mgr->ctxt_cnt) {
+		CAM_DBG(CAM_ICP, "Last Release");
+		cam_icp_mgr_icp_power_collapse(hw_mgr);
+		cam_icp_hw_mgr_reset_clk_info(hw_mgr);
+		rc = cam_ipe_bps_deint(hw_mgr);
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	if ((!hw_mgr->bps_ctxt_cnt || !hw_mgr->ipe_ctxt_cnt))
+		cam_icp_device_timer_stop(hw_mgr);
+
+	CAM_DBG(CAM_ICP, "Release done for ctx_id %d", ctx_id);
+	return rc;
+}
+
+static int cam_icp_mgr_create_handle(uint32_t dev_type,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct hfi_cmd_create_handle create_handle;
+	struct hfi_cmd_work_data *task_data;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+	struct crm_workq_task *task;
+	int rc = 0;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task)
+		return -ENOMEM;
+
+	create_handle.size = sizeof(struct hfi_cmd_create_handle);
+	create_handle.pkt_type = HFI_CMD_IPEBPS_CREATE_HANDLE;
+	create_handle.handle_type = dev_type;
+	create_handle.user_data1 = PTR_TO_U64(ctx_data);
+	reinit_completion(&ctx_data->wait_complete);
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&create_handle;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_hfi_queue_dump();
+	}
+
+	if (ctx_data->fw_handle == 0) {
+		CAM_ERR(CAM_ICP, "Invalid handle created");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_icp_mgr_send_ping(struct cam_icp_hw_ctx_data *ctx_data)
+{
+	struct hfi_cmd_ping_pkt ping_pkt;
+	struct hfi_cmd_work_data *task_data;
+	unsigned long rem_jiffies;
+	int timeout = 5000;
+	struct crm_workq_task *task;
+	int rc = 0;
+
+	task = cam_req_mgr_workq_get_task(icp_hw_mgr.cmd_work);
+	if (!task) {
+		CAM_ERR(CAM_ICP, "No free task to send ping command");
+		return -ENOMEM;
+	}
+
+	ping_pkt.size = sizeof(struct hfi_cmd_ping_pkt);
+	ping_pkt.pkt_type = HFI_CMD_SYS_PING;
+	ping_pkt.user_data = PTR_TO_U64(ctx_data);
+	init_completion(&ctx_data->wait_complete);
+	task_data = (struct hfi_cmd_work_data *)task->payload;
+	task_data->data = (void *)&ping_pkt;
+	task_data->request_id = 0;
+	task_data->type = ICP_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_icp_mgr_process_cmd;
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &icp_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc)
+		return rc;
+
+	rem_jiffies = wait_for_completion_timeout(&ctx_data->wait_complete,
+			msecs_to_jiffies((timeout)));
+	if (!rem_jiffies) {
+		rc = -ETIMEDOUT;
+		CAM_ERR(CAM_ICP, "FW response timed out %d", rc);
+		cam_hfi_queue_dump();
+	}
+
+	return rc;
+}
+
+static int cam_icp_get_acquire_info(struct cam_icp_hw_mgr *hw_mgr,
+	struct cam_hw_acquire_args *args,
+	struct cam_icp_hw_ctx_data *ctx_data)
+{
+	int i;
+	int acquire_size;
+	struct cam_icp_acquire_dev_info icp_dev_acquire_info;
+	struct cam_icp_res_info *p_icp_out = NULL;
+
+	if (copy_from_user(&icp_dev_acquire_info,
+		(void __user *)args->acquire_info,
+		sizeof(struct cam_icp_acquire_dev_info))) {
+		CAM_ERR(CAM_ICP, "Failed in acquire");
+		return -EFAULT;
+	}
+
+	if (icp_dev_acquire_info.secure_mode > CAM_SECURE_MODE_SECURE) {
+		CAM_ERR(CAM_ICP, "Invalid mode:%d",
+			icp_dev_acquire_info.secure_mode);
+		return -EINVAL;
+	}
+
+	if (icp_dev_acquire_info.num_out_res > ICP_MAX_OUTPUT_SUPPORTED) {
+		CAM_ERR(CAM_ICP, "num of out resources exceeding : %u",
+			icp_dev_acquire_info.num_out_res);
+		return -EINVAL;
+	}
+
+	if (icp_dev_acquire_info.dev_type >= CAM_ICP_RES_TYPE_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid device type: %d",
+			icp_dev_acquire_info.dev_type);
+		return -EFAULT;
+	}
+
+	acquire_size = sizeof(struct cam_icp_acquire_dev_info) +
+		((icp_dev_acquire_info.num_out_res - 1) *
+		sizeof(struct cam_icp_res_info));
+	ctx_data->icp_dev_acquire_info = kzalloc(acquire_size, GFP_KERNEL);
+	if (!ctx_data->icp_dev_acquire_info)
+		return -ENOMEM;
+
+	if (copy_from_user(ctx_data->icp_dev_acquire_info,
+		(void __user *)args->acquire_info, acquire_size)) {
+		CAM_ERR(CAM_ICP, "Failed in acquire: size = %d", acquire_size);
+		kfree(ctx_data->icp_dev_acquire_info);
+		ctx_data->icp_dev_acquire_info = NULL;
+		return -EFAULT;
+	}
+
+	CAM_DBG(CAM_ICP, "%x %x %x %x %x %x %x",
+		ctx_data->icp_dev_acquire_info->dev_type,
+		ctx_data->icp_dev_acquire_info->in_res.format,
+		ctx_data->icp_dev_acquire_info->in_res.width,
+		ctx_data->icp_dev_acquire_info->in_res.height,
+		ctx_data->icp_dev_acquire_info->in_res.fps,
+		ctx_data->icp_dev_acquire_info->num_out_res,
+		ctx_data->icp_dev_acquire_info->scratch_mem_size);
+
+	p_icp_out = ctx_data->icp_dev_acquire_info->out_res;
+	for (i = 0; i < icp_dev_acquire_info.num_out_res; i++)
+		CAM_DBG(CAM_ICP, "out[i] %x %x %x %x",
+			p_icp_out[i].format,
+			p_icp_out[i].width,
+			p_icp_out[i].height,
+			p_icp_out[i].fps);
+
+	return 0;
+}
+
+static const char *cam_icp_dev_type_to_name(
+	uint32_t dev_type)
+{
+	switch (dev_type) {
+	case CAM_ICP_RES_TYPE_BPS:
+		return "BPS";
+	case CAM_ICP_RES_TYPE_IPE_RT:
+		return "IPE_RT";
+	case CAM_ICP_RES_TYPE_IPE:
+		return "IPE";
+	default:
+		return "Invalid dev type";
+	}
+}
+
+static int cam_icp_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	int rc = 0, bitmap_size = 0;
+	uint32_t ctx_id = 0;
+	uint64_t io_buf_addr;
+	size_t io_buf_size;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_icp_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_acquire_args *args = acquire_hw_args;
+	struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
+
+	if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK", hw_mgr_priv,
+			acquire_hw_args);
+		return -EINVAL;
+	}
+
+	if (args->num_acq > 1) {
+		CAM_ERR(CAM_ICP, "number of resources are wrong: %u",
+			args->num_acq);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ICP, "ENTER");
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_id = cam_icp_mgr_get_free_ctx(hw_mgr);
+	if (ctx_id >= CAM_ICP_CTX_MAX) {
+		CAM_ERR(CAM_ICP, "No free ctx space in hw_mgr");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -ENOSPC;
+	}
+	ctx_data = &hw_mgr->ctx_data[ctx_id];
+	ctx_data->ctx_id = ctx_id;
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	rc = cam_icp_get_acquire_info(hw_mgr, args, ctx_data);
+	if (rc)
+		goto acquire_info_failed;
+
+	icp_dev_acquire_info = ctx_data->icp_dev_acquire_info;
+
+	CAM_DBG(CAM_ICP, "acquire io buf handle %d",
+		icp_dev_acquire_info->io_config_cmd_handle);
+	rc = cam_mem_get_io_buf(
+		icp_dev_acquire_info->io_config_cmd_handle,
+		hw_mgr->iommu_hdl,
+		&io_buf_addr, &io_buf_size);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to get src buf info from io desc");
+		goto get_io_buf_failed;
+	}
+
+	CAM_DBG(CAM_ICP, "hdl: %d, addr: %pK, size: %zu",
+		icp_dev_acquire_info->io_config_cmd_handle,
+		(void *)io_buf_addr, io_buf_size);
+
+	if (!hw_mgr->ctxt_cnt) {
+		rc = cam_icp_clk_info_init(hw_mgr, ctx_data);
+		if (rc)
+			goto get_io_buf_failed;
+
+		rc = cam_icp_mgr_icp_resume(hw_mgr);
+		if (rc)
+			goto get_io_buf_failed;
+
+		if (icp_hw_mgr.a5_debug_type)
+			hfi_set_debug_level(icp_hw_mgr.a5_debug_type,
+				icp_hw_mgr.a5_dbg_lvl);
+
+		hfi_set_fw_dump_level(icp_hw_mgr.a5_fw_dump_lvl);
+
+		rc = cam_icp_send_ubwc_cfg(hw_mgr);
+		if (rc)
+			goto ubwc_cfg_failed;
+	}
+
+
+	rc = cam_icp_mgr_ipe_bps_resume(hw_mgr, ctx_data);
+	if (rc)
+		goto ipe_bps_resume_failed;
+
+	rc = cam_icp_mgr_send_ping(ctx_data);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "ping ack not received");
+		goto send_ping_failed;
+	}
+	CAM_DBG(CAM_ICP, "ping ack received");
+
+	rc = cam_icp_mgr_create_handle(icp_dev_acquire_info->dev_type,
+		ctx_data);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "create handle failed");
+		goto create_handle_failed;
+	}
+
+	rc = cam_icp_mgr_send_config_io(ctx_data, io_buf_addr);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "IO Config command failed");
+		goto ioconfig_failed;
+	}
+
+	ctx_data->context_priv = args->context_data;
+	args->ctxt_to_hw_map = ctx_data;
+
+	bitmap_size = BITS_TO_LONGS(CAM_FRAME_CMD_MAX) * sizeof(long);
+	ctx_data->hfi_frame_process.bitmap =
+			kzalloc(bitmap_size, GFP_KERNEL);
+	if (!ctx_data->hfi_frame_process.bitmap)
+		goto ioconfig_failed;
+
+	ctx_data->hfi_frame_process.bits = bitmap_size * BITS_PER_BYTE;
+	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+	icp_dev_acquire_info->scratch_mem_size = ctx_data->scratch_mem_size;
+
+	if (copy_to_user((void __user *)args->acquire_info,
+		icp_dev_acquire_info, sizeof(struct cam_icp_acquire_dev_info)))
+		goto copy_to_user_failed;
+
+	cam_icp_ctx_clk_info_init(ctx_data);
+	ctx_data->state = CAM_ICP_CTX_STATE_ACQUIRED;
+	mutex_unlock(&ctx_data->ctx_mutex);
+	CAM_DBG(CAM_ICP, "scratch size = %x fw_handle = %x",
+			(unsigned int)icp_dev_acquire_info->scratch_mem_size,
+			(unsigned int)ctx_data->fw_handle);
+	/* Start device timer*/
+	if (((hw_mgr->bps_ctxt_cnt == 1) || (hw_mgr->ipe_ctxt_cnt == 1)))
+		cam_icp_device_timer_start(hw_mgr);
+	/* Start context timer*/
+	cam_icp_ctx_timer_start(ctx_data);
+	hw_mgr->ctxt_cnt++;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	CAM_DBG(CAM_ICP, "Acquire Done for ctx_id %u dev name %s dev type %d",
+		ctx_data->ctx_id, cam_icp_dev_type_to_name(
+		icp_dev_acquire_info->dev_type),
+		icp_dev_acquire_info->dev_type);
+
+	return 0;
+
+copy_to_user_failed:
+	kfree(ctx_data->hfi_frame_process.bitmap);
+	ctx_data->hfi_frame_process.bitmap = NULL;
+ioconfig_failed:
+	cam_icp_mgr_destroy_handle(ctx_data);
+create_handle_failed:
+send_ping_failed:
+	cam_icp_mgr_ipe_bps_power_collapse(hw_mgr, ctx_data, 0);
+ipe_bps_resume_failed:
+ubwc_cfg_failed:
+	if (!hw_mgr->ctxt_cnt)
+		cam_icp_mgr_icp_power_collapse(hw_mgr);
+get_io_buf_failed:
+	kfree(hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info);
+	hw_mgr->ctx_data[ctx_id].icp_dev_acquire_info = NULL;
+acquire_info_failed:
+	cam_icp_mgr_put_ctx(ctx_data);
+	mutex_unlock(&ctx_data->ctx_mutex);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+	int rc = 0;
+	struct cam_icp_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+	if ((!hw_mgr_priv) || (!hw_caps_args)) {
+		CAM_ERR(CAM_ICP, "Invalid params: %pK %pK",
+			hw_mgr_priv, hw_caps_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (copy_from_user(&icp_hw_mgr.icp_caps,
+		u64_to_user_ptr(query_cap->caps_handle),
+		sizeof(struct cam_icp_query_cap_cmd))) {
+		CAM_ERR(CAM_ICP, "copy_from_user failed");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = hfi_get_hw_caps(&icp_hw_mgr.icp_caps);
+	if (rc)
+		goto end;
+
+	icp_hw_mgr.icp_caps.dev_iommu_handle.non_secure = hw_mgr->iommu_hdl;
+	icp_hw_mgr.icp_caps.dev_iommu_handle.secure = hw_mgr->iommu_sec_hdl;
+
+	if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
+		&icp_hw_mgr.icp_caps, sizeof(struct cam_icp_query_cap_cmd))) {
+		CAM_ERR(CAM_ICP, "copy_to_user failed");
+		rc = -EFAULT;
+	}
+end:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_icp_mgr_alloc_devs(struct device_node *of_node)
+{
+	int rc;
+	uint32_t num_dev;
+
+	rc = of_property_read_u32(of_node, "num-a5", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "getting num of a5 failed");
+		goto num_a5_failed;
+	}
+
+	icp_hw_mgr.devices[CAM_ICP_DEV_A5] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_A5]) {
+		rc = -ENOMEM;
+		goto num_a5_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-ipe", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "getting number of ipe dev nodes failed");
+		goto num_ipe_failed;
+	}
+
+	if (!icp_hw_mgr.ipe1_enable)
+		num_dev = 1;
+
+	icp_hw_mgr.devices[CAM_ICP_DEV_IPE] = kcalloc(num_dev,
+		sizeof(struct cam_hw_intf *), GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_IPE]) {
+		rc = -ENOMEM;
+		goto num_ipe_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-bps", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "read num bps devices failed");
+		goto num_bps_failed;
+	}
+	icp_hw_mgr.devices[CAM_ICP_DEV_BPS] = kcalloc(num_dev,
+		sizeof(struct cam_hw_intf *), GFP_KERNEL);
+	if (!icp_hw_mgr.devices[CAM_ICP_DEV_BPS]) {
+		rc = -ENOMEM;
+		goto num_bps_failed;
+	}
+
+	return 0;
+num_bps_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+num_ipe_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+num_a5_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_init_devs(struct device_node *of_node)
+{
+	int rc = 0;
+	int count, i;
+	const char *name = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *child_dev_intf = NULL;
+
+	rc = cam_icp_mgr_alloc_devs(of_node);
+	if (rc)
+		return rc;
+
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count) {
+		CAM_ERR(CAM_ICP, "no compat hw found in dev tree, cnt = %d",
+			count);
+		rc = -EINVAL;
+		goto compat_hw_name_failed;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "compat-hw-name",
+			i, &name);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "getting dev object name failed");
+			goto compat_hw_name_failed;
+		}
+
+		child_node = of_find_node_by_name(NULL, name);
+		if (!child_node) {
+			CAM_ERR(CAM_ICP, "Cannot find node in dtsi %s", name);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+
+		child_pdev = of_find_device_by_node(child_node);
+		if (!child_pdev) {
+			CAM_ERR(CAM_ICP, "failed to find device on bus %s",
+				child_node->name);
+			rc = -ENODEV;
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+
+		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+			child_pdev);
+		if (!child_dev_intf) {
+			CAM_ERR(CAM_ICP, "no child device");
+			of_node_put(child_node);
+			if (!icp_hw_mgr.ipe1_enable)
+				continue;
+			goto compat_hw_name_failed;
+		}
+		icp_hw_mgr.devices[child_dev_intf->hw_type]
+			[child_dev_intf->hw_idx] = child_dev_intf;
+
+		if (!child_dev_intf->hw_ops.process_cmd)
+			goto compat_hw_name_failed;
+
+		of_node_put(child_node);
+	}
+
+	icp_hw_mgr.a5_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_A5][0];
+	icp_hw_mgr.bps_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_BPS][0];
+	icp_hw_mgr.ipe0_dev_intf = icp_hw_mgr.devices[CAM_ICP_DEV_IPE][0];
+	if (icp_hw_mgr.ipe1_enable)
+		icp_hw_mgr.ipe1_dev_intf =
+			icp_hw_mgr.devices[CAM_ICP_DEV_IPE][1];
+
+	return 0;
+compat_hw_name_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+	return rc;
+}
+
+static int cam_icp_mgr_create_wq(void)
+{
+	int rc;
+	int i;
+
+	rc = cam_req_mgr_workq_create("icp_command_queue", ICP_WORKQ_NUM_TASK,
+		&icp_hw_mgr.cmd_work, CRM_WORKQ_USAGE_NON_IRQ,
+		0);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to create a command worker");
+		goto cmd_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("icp_message_queue", ICP_WORKQ_NUM_TASK,
+		&icp_hw_mgr.msg_work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to create a message worker");
+		goto msg_work_failed;
+	}
+
+	rc = cam_req_mgr_workq_create("icp_timer_queue", ICP_WORKQ_NUM_TASK,
+		&icp_hw_mgr.timer_work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "unable to create a timer worker");
+		goto timer_work_failed;
+	}
+
+	icp_hw_mgr.cmd_work_data =
+		kzalloc(sizeof(struct hfi_cmd_work_data) * ICP_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!icp_hw_mgr.cmd_work_data)
+		goto cmd_work_data_failed;
+
+	icp_hw_mgr.msg_work_data =
+		kzalloc(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!icp_hw_mgr.msg_work_data)
+		goto msg_work_data_failed;
+
+	icp_hw_mgr.timer_work_data =
+		kzalloc(sizeof(struct hfi_msg_work_data) * ICP_WORKQ_NUM_TASK,
+		GFP_KERNEL);
+	if (!icp_hw_mgr.timer_work_data)
+		goto timer_work_data_failed;
+
+	rc = cam_icp_hw_mgr_create_debugfs_entry();
+	if (rc)
+		goto debugfs_create_failed;
+
+	icp_hw_mgr.icp_pc_flag = true;
+	icp_hw_mgr.ipe_bps_pc_flag = false;
+
+	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+		icp_hw_mgr.msg_work->task.pool[i].payload =
+				&icp_hw_mgr.msg_work_data[i];
+
+	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+		icp_hw_mgr.cmd_work->task.pool[i].payload =
+				&icp_hw_mgr.cmd_work_data[i];
+
+	for (i = 0; i < ICP_WORKQ_NUM_TASK; i++)
+		icp_hw_mgr.timer_work->task.pool[i].payload =
+				&icp_hw_mgr.timer_work_data[i];
+	return 0;
+
+debugfs_create_failed:
+	kfree(icp_hw_mgr.timer_work_data);
+timer_work_data_failed:
+	kfree(icp_hw_mgr.msg_work_data);
+msg_work_data_failed:
+	kfree(icp_hw_mgr.cmd_work_data);
+cmd_work_data_failed:
+	cam_req_mgr_workq_destroy(&icp_hw_mgr.timer_work);
+timer_work_failed:
+	cam_req_mgr_workq_destroy(&icp_hw_mgr.msg_work);
+msg_work_failed:
+	cam_req_mgr_workq_destroy(&icp_hw_mgr.cmd_work);
+cmd_work_failed:
+	return rc;
+}
+
+static int cam_icp_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_icp_hw_mgr  *hw_mgr = hw_mgr_priv;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_ICP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_icp_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->iommu_hdl,
+			hw_mgr->iommu_sec_hdl,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid cmd");
+	}
+
+	return rc;
+}
+
+int cam_icp_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+	int *iommu_hdl)
+{
+	int i, rc = 0;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	struct cam_cpas_query_cap query;
+	uint32_t cam_caps;
+
+	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+	if (!of_node || !hw_mgr_intf) {
+		CAM_ERR(CAM_ICP, "Invalid args of_node %pK hw_mgr %pK",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	hw_mgr_intf->hw_mgr_priv = &icp_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_icp_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_icp_mgr_acquire_hw;
+	hw_mgr_intf->hw_release = cam_icp_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_icp_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config = cam_icp_mgr_config_hw;
+	hw_mgr_intf->hw_open = cam_icp_mgr_hw_open_u;
+	hw_mgr_intf->hw_close = cam_icp_mgr_hw_close_u;
+	hw_mgr_intf->hw_flush = cam_icp_mgr_hw_flush;
+	hw_mgr_intf->hw_cmd = cam_icp_mgr_cmd;
+
+	icp_hw_mgr.secure_mode = CAM_SECURE_MODE_NON_SECURE;
+	mutex_init(&icp_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&icp_hw_mgr.hw_mgr_lock);
+
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+		mutex_init(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+
+	cam_cpas_get_hw_info(&query.camera_family,
+		&query.camera_version, &query.cpas_version, &cam_caps);
+	if (cam_caps & CPAS_IPE0_BIT)
+		icp_hw_mgr.ipe0_enable = true;
+	if (cam_caps & CPAS_IPE1_BIT)
+		icp_hw_mgr.ipe1_enable = true;
+	if (cam_caps & CPAS_BPS_BIT)
+		icp_hw_mgr.bps_enable = true;
+
+	rc = cam_icp_mgr_init_devs(of_node);
+	if (rc)
+		goto dev_init_failed;
+
+	rc = cam_smmu_get_handle("icp", &icp_hw_mgr.iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "get mmu handle failed: %d", rc);
+		goto icp_get_hdl_failed;
+	}
+
+	rc = cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "icp attach failed: %d", rc);
+		goto icp_attach_failed;
+	}
+
+	rc = cam_smmu_get_handle("cam-secure", &icp_hw_mgr.iommu_sec_hdl);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "get secure mmu handle failed: %d", rc);
+		goto secure_hdl_failed;
+	}
+
+	rc = cam_icp_mgr_create_wq();
+	if (rc)
+		goto icp_wq_create_failed;
+
+	if (iommu_hdl)
+		*iommu_hdl = icp_hw_mgr.iommu_hdl;
+
+	init_completion(&icp_hw_mgr.a5_complete);
+	return rc;
+
+icp_wq_create_failed:
+	cam_smmu_destroy_handle(icp_hw_mgr.iommu_sec_hdl);
+	icp_hw_mgr.iommu_sec_hdl = -1;
+secure_hdl_failed:
+	cam_smmu_ops(icp_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
+icp_attach_failed:
+	cam_smmu_destroy_handle(icp_hw_mgr.iommu_hdl);
+	icp_hw_mgr.iommu_hdl = -1;
+icp_get_hdl_failed:
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_BPS]);
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_IPE]);
+	kfree(icp_hw_mgr.devices[CAM_ICP_DEV_A5]);
+dev_init_failed:
+	mutex_destroy(&icp_hw_mgr.hw_mgr_mutex);
+	for (i = 0; i < CAM_ICP_CTX_MAX; i++)
+		mutex_destroy(&icp_hw_mgr.ctx_data[i].ctx_mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
new file mode 100644
index 0000000..3f63bc4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/cam_icp_hw_mgr.h
@@ -0,0 +1,364 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_ICP_HW_MGR_H
+#define CAM_ICP_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_icp.h>
+#include "cam_icp_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_a5_hw_intf.h"
+#include "hfi_session_defs.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+#include "cam_soc_util.h"
+#include "cam_req_mgr_timer.h"
+
+#define CAM_ICP_ROLE_PARENT     1
+#define CAM_ICP_ROLE_CHILD      2
+
+#define CAM_FRAME_CMD_MAX       20
+
+#define CAM_MAX_OUT_RES         6
+#define CAM_MAX_IN_RES          8
+
+#define ICP_WORKQ_NUM_TASK      100
+#define ICP_WORKQ_TASK_CMD_TYPE 1
+#define ICP_WORKQ_TASK_MSG_TYPE 2
+
+#define ICP_PACKET_SIZE         0
+#define ICP_PACKET_TYPE         1
+#define ICP_PACKET_OPCODE       2
+#define ICP_MAX_OUTPUT_SUPPORTED 6
+
+#define ICP_FRAME_PROCESS_SUCCESS 0
+#define ICP_FRAME_PROCESS_FAILURE 1
+#define ICP_MSG_BUF_SIZE        256
+#define ICP_DBG_BUF_SIZE        102400
+
+#define ICP_CLK_HW_IPE          0x0
+#define ICP_CLK_HW_BPS          0x1
+#define ICP_CLK_HW_MAX          0x2
+
+#define ICP_OVER_CLK_THRESHOLD  15
+
+#define CPAS_IPE0_BIT           0x1000
+#define CPAS_IPE1_BIT           0x2000
+#define CPAS_BPS_BIT            0x400
+
+#define ICP_PWR_CLP_BPS         0x00000001
+#define ICP_PWR_CLP_IPE0        0x00010000
+#define ICP_PWR_CLP_IPE1        0x00020000
+
+#define CAM_ICP_CTX_STATE_FREE      0x0
+#define CAM_ICP_CTX_STATE_IN_USE    0x1
+#define CAM_ICP_CTX_STATE_ACQUIRED  0x2
+#define CAM_ICP_CTX_STATE_RELEASE   0x3
+
+#define CAM_ICP_CTX_MAX_CMD_BUFFERS 0x2
+
+/**
+ * struct icp_hfi_mem_info
+ * @qtbl: Memory info of queue table
+ * @cmd_q: Memory info of command queue
+ * @msg_q: Memory info of message queue
+ * @dbg_q: Memory info of debug queue
+ * @sec_heap: Memory info of secondary heap
+ * @fw_buf: Memory info of firmware
+ * @qdss_buf: Memory info of qdss
+ * @sfr_buf: Memory info for sfr buffer
+ */
+struct icp_hfi_mem_info {
+	struct cam_mem_mgr_memory_desc qtbl;
+	struct cam_mem_mgr_memory_desc cmd_q;
+	struct cam_mem_mgr_memory_desc msg_q;
+	struct cam_mem_mgr_memory_desc dbg_q;
+	struct cam_mem_mgr_memory_desc sec_heap;
+	struct cam_mem_mgr_memory_desc fw_buf;
+	struct cam_mem_mgr_memory_desc qdss_buf;
+	struct cam_mem_mgr_memory_desc sfr_buf;
+	struct cam_smmu_region_info shmem;
+};
+
+/**
+ * struct hfi_cmd_work_data
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct hfi_cmd_work_data {
+	uint32_t type;
+	void *data;
+	int32_t request_id;
+};
+
+/**
+ * struct hfi_msg_work_data
+ * @type: Task type
+ * @data: Pointer to message data
+ * @irq_status: IRQ status
+ */
+struct hfi_msg_work_data {
+	uint32_t type;
+	void *data;
+	uint32_t irq_status;
+};
+
+/**
+ * struct clk_work_data
+ * @type: Task type
+ * @data: Pointer to clock info
+ */
+struct clk_work_data {
+	uint32_t type;
+	void *data;
+};
+
+/*
+ * struct icp_frame_info
+ * @request_id: request id
+ * @io_config: the address of io config
+ * @hfi_cfg_io_cmd: command struct to be sent to hfi
+ */
+struct icp_frame_info {
+	uint64_t request_id;
+	uint64_t io_config;
+	struct hfi_cmd_ipebps_async hfi_cfg_io_cmd;
+};
+
+
+/**
+ * struct hfi_frame_process_info
+ * @hfi_frame_cmd: Frame process command info
+ * @bitmap: Bitmap for hfi_frame_cmd
+ * @bits: Used in hfi_frame_cmd bitmap
+ * @lock: Lock for hfi_frame_cmd
+ * @request_id: Request id list
+ * @num_out_resources: Number of out syncs
+ * @out_resource: Out sync info
+ * @fw_process_flag: Frame process flag
+ * @clk_info: Clock information for a request
+ * @frame_info: information needed to process request
+ */
+struct hfi_frame_process_info {
+	struct hfi_cmd_ipebps_async hfi_frame_cmd[CAM_FRAME_CMD_MAX];
+	void *bitmap;
+	size_t bits;
+	struct mutex lock;
+	uint64_t request_id[CAM_FRAME_CMD_MAX];
+	uint32_t num_out_resources[CAM_FRAME_CMD_MAX];
+	uint32_t out_resource[CAM_FRAME_CMD_MAX][CAM_MAX_OUT_RES];
+	uint32_t in_resource[CAM_FRAME_CMD_MAX];
+	uint32_t in_free_resource[CAM_FRAME_CMD_MAX];
+	uint32_t fw_process_flag[CAM_FRAME_CMD_MAX];
+	struct cam_icp_clk_bw_request clk_info[CAM_FRAME_CMD_MAX];
+	struct icp_frame_info frame_info[CAM_FRAME_CMD_MAX];
+};
+
+/**
+ * struct cam_ctx_clk_info
+ * @curr_fc: Context latest request frame cycles
+ * @rt_flag: Flag to indicate real time request
+ * @base_clk: Base clock to process the request
+ * @reserved: Reserved field
+ * #uncompressed_bw: Current bandwidth voting
+ * @compressed_bw: Current compressed bandwidth voting
+ * @clk_rate: Supported clock rates for the context
+ */
+struct cam_ctx_clk_info {
+	uint32_t curr_fc;
+	uint32_t rt_flag;
+	uint32_t base_clk;
+	uint32_t reserved;
+	uint64_t uncompressed_bw;
+	uint64_t compressed_bw;
+	int32_t clk_rate[CAM_MAX_VOTE];
+};
+/**
+ * struct cam_icp_hw_ctx_data
+ * @context_priv: Context private data
+ * @ctx_mutex: Mutex for context
+ * @fw_handle: Firmware handle
+ * @scratch_mem_size: Scratch memory size
+ * @acquire_dev_cmd: Acquire command
+ * @icp_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @state: context state
+ * @role: Role of a context in case of chaining
+ * @chain_ctx: Peer context
+ * @hfi_frame_process: Frame process command
+ * @wait_complete: Completion info
+ * @temp_payload: Payload for destroy handle data
+ * @ctx_id: Context Id
+ * @clk_info: Current clock info of a context
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
+ * @icp_dev_io_info: io config resource
+ */
+struct cam_icp_hw_ctx_data {
+	void *context_priv;
+	struct mutex ctx_mutex;
+	uint32_t fw_handle;
+	uint32_t scratch_mem_size;
+	struct cam_acquire_dev_cmd acquire_dev_cmd;
+	struct cam_icp_acquire_dev_info *icp_dev_acquire_info;
+	cam_hw_event_cb_func ctxt_event_cb;
+	uint32_t state;
+	uint32_t role;
+	struct cam_icp_hw_ctx_data *chain_ctx;
+	struct hfi_frame_process_info hfi_frame_process;
+	struct completion wait_complete;
+	struct ipe_bps_destroy temp_payload;
+	uint32_t ctx_id;
+	struct cam_ctx_clk_info clk_info;
+	struct cam_req_mgr_timer *watch_dog;
+	uint32_t watch_dog_reset_counter;
+	struct cam_icp_acquire_dev_info icp_dev_io_info;
+};
+
+/**
+ * struct icp_cmd_generic_blob
+ * @ctx: Current context info
+ * @frame_info_idx: Index used for frame process info
+ * @io_buf_addr: pointer to io buffer address
+ */
+struct icp_cmd_generic_blob {
+	struct cam_icp_hw_ctx_data *ctx;
+	uint32_t frame_info_idx;
+	uint64_t *io_buf_addr;
+};
+
+/**
+ * struct cam_icp_clk_info
+ * @base_clk: Base clock to process request
+ * @curr_clk: Current clock of hadrware
+ * @threshold: Threshold for overclk count
+ * @over_clked: Over clock count
+ * @uncompressed_bw: Current bandwidth voting
+ * @compressed_bw: Current compressed bandwidth voting
+ * @hw_type: IPE/BPS device type
+ * @watch_dog: watchdog timer handle
+ * @watch_dog_reset_counter: Counter for watch dog reset
+ */
+struct cam_icp_clk_info {
+	uint32_t base_clk;
+	uint32_t curr_clk;
+	uint32_t threshold;
+	uint32_t over_clked;
+	uint64_t uncompressed_bw;
+	uint64_t compressed_bw;
+	uint32_t hw_type;
+	struct cam_req_mgr_timer *watch_dog;
+	uint32_t watch_dog_reset_counter;
+};
+
+/**
+ * struct cam_icp_hw_mgr
+ * @hw_mgr_mutex: Mutex for ICP hardware manager
+ * @hw_mgr_lock: Spinlock for ICP hardware manager
+ * @devices: Devices of ICP hardware manager
+ * @ctx_data: Context data
+ * @icp_caps: ICP capabilities
+ * @fw_download: Firmware download state
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @hfi_mem: Memory for hfi
+ * @cmd_work: Work queue for hfi commands
+ * @msg_work: Work queue for hfi messages
+ * @timer_work: Work queue for timer watchdog
+ * @msg_buf: Buffer for message data from firmware
+ * @dbg_buf: Buffer for debug data from firmware
+ * @a5_complete: Completion info
+ * @cmd_work_data: Pointer to command work queue task
+ * @msg_work_data: Pointer to message work queue task
+ * @timer_work_data: Pointer to timer work queue task
+ * @ctxt_cnt: Active context count
+ * @ipe_ctxt_cnt: IPE Active context count
+ * @bps_ctxt_cnt: BPS Active context count
+ * @dentry: Debugfs entry
+ * @a5_debug: A5 debug flag
+ * @icp_pc_flag: Flag to enable/disable power collapse
+ * @ipe_bps_pc_flag: Flag to enable/disable
+ *                   power collapse for ipe & bps
+ * @icp_debug_clk: Set clock based on debug value
+ * @icp_default_clk: Set this clok if user doesn't supply
+ * @clk_info: Clock info of hardware
+ * @secure_mode: Flag to enable/disable secure camera
+ * @a5_jtag_debug: entry to enable A5 JTAG debugging
+ * @a5_debug_type : entry to enable FW debug message/qdss
+ * @a5_dbg_lvl : debug level set to FW.
+ * @a5_fw_dump_lvl : level set for dumping the FW data
+ * @ipe0_enable: Flag for IPE0
+ * @ipe1_enable: Flag for IPE1
+ * @bps_enable: Flag for BPS
+ * @core_info: 32 bit value , tells IPE0/1 and BPS
+ * @a5_dev_intf : Device interface for A5
+ * @ipe0_dev_intf: Device interface for IPE0
+ * @ipe1_dev_intf: Device interface for IPE1
+ * @bps_dev_intf: Device interface for BPS
+ * @ipe_clk_state: IPE clock state flag
+ * @bps_clk_state: BPS clock state flag
+ * @recovery: Flag to validate if in previous session FW
+ *            reported a fatal error or wdt. If set FW is
+ *            re-downloaded for new camera session.
+ */
+struct cam_icp_hw_mgr {
+	struct mutex hw_mgr_mutex;
+	spinlock_t hw_mgr_lock;
+
+	struct cam_hw_intf **devices[CAM_ICP_DEV_MAX];
+	struct cam_icp_hw_ctx_data ctx_data[CAM_ICP_CTX_MAX];
+	struct cam_icp_query_cap_cmd icp_caps;
+
+	bool fw_download;
+	int32_t iommu_hdl;
+	int32_t iommu_sec_hdl;
+	struct icp_hfi_mem_info hfi_mem;
+	struct cam_req_mgr_core_workq *cmd_work;
+	struct cam_req_mgr_core_workq *msg_work;
+	struct cam_req_mgr_core_workq *timer_work;
+	uint32_t msg_buf[ICP_MSG_BUF_SIZE];
+	uint32_t dbg_buf[ICP_DBG_BUF_SIZE];
+	struct completion a5_complete;
+	struct hfi_cmd_work_data *cmd_work_data;
+	struct hfi_msg_work_data *msg_work_data;
+	struct hfi_msg_work_data *timer_work_data;
+	uint32_t ctxt_cnt;
+	uint32_t ipe_ctxt_cnt;
+	uint32_t bps_ctxt_cnt;
+	struct dentry *dentry;
+	bool a5_debug;
+	bool icp_pc_flag;
+	bool ipe_bps_pc_flag;
+	uint64_t icp_debug_clk;
+	uint64_t icp_default_clk;
+	struct cam_icp_clk_info clk_info[ICP_CLK_HW_MAX];
+	bool secure_mode;
+	bool a5_jtag_debug;
+	u64 a5_debug_type;
+	u64 a5_dbg_lvl;
+	u64 a5_fw_dump_lvl;
+	bool ipe0_enable;
+	bool ipe1_enable;
+	bool bps_enable;
+	uint32_t core_info;
+	struct cam_hw_intf *a5_dev_intf;
+	struct cam_hw_intf *ipe0_dev_intf;
+	struct cam_hw_intf *ipe1_dev_intf;
+	struct cam_hw_intf *bps_dev_intf;
+	bool ipe_clk_state;
+	bool bps_clk_state;
+	atomic_t recovery;
+};
+
+static int cam_icp_mgr_hw_close(void *hw_priv, void *hw_close_args);
+static int cam_icp_mgr_hw_open(void *hw_mgr_priv, void *download_fw_args);
+static int cam_icp_mgr_icp_resume(struct cam_icp_hw_mgr *hw_mgr);
+static int cam_icp_mgr_icp_power_collapse(struct cam_icp_hw_mgr *hw_mgr);
+#endif /* CAM_ICP_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
new file mode 100644
index 0000000..138caec
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_a5_hw_intf.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_A5_HW_INTF_H
+#define CAM_A5_HW_INTF_H
+
+#include <linux/timer.h>
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+enum cam_icp_a5_cmd_type {
+	CAM_ICP_A5_CMD_FW_DOWNLOAD,
+	CAM_ICP_A5_CMD_POWER_COLLAPSE,
+	CAM_ICP_A5_CMD_POWER_RESUME,
+	CAM_ICP_A5_CMD_SET_FW_BUF,
+	CAM_ICP_A5_CMD_ACQUIRE,
+	CAM_ICP_A5_SET_IRQ_CB,
+	CAM_ICP_A5_TEST_IRQ,
+	CAM_ICP_A5_SEND_INIT,
+	CAM_ICP_A5_CMD_VOTE_CPAS,
+	CAM_ICP_A5_CMD_CPAS_START,
+	CAM_ICP_A5_CMD_CPAS_STOP,
+	CAM_ICP_A5_CMD_UBWC_CFG,
+	CAM_ICP_A5_CMD_PC_PREP,
+	CAM_ICP_A5_CMD_MAX,
+};
+
+struct cam_icp_a5_set_fw_buf_info {
+	uint32_t iova;
+	uint64_t kva;
+	uint64_t len;
+};
+
+/**
+ * struct cam_icp_a5_query_cap - ICP query device capability payload
+ * @fw_version: firmware version info
+ * @api_version: api version info
+ * @num_ipe: number of ipes
+ * @num_bps: number of bps
+ * @num_dev: number of device capabilities in dev_caps
+ * @reserved: reserved
+ * @dev_ver: returned device capability array
+ * @CAM_QUERY_CAP IOCTL
+ */
+struct cam_icp_a5_query_cap {
+	struct cam_icp_ver fw_version;
+	struct cam_icp_ver api_version;
+	uint32_t num_ipe;
+	uint32_t num_bps;
+	uint32_t num_dev;
+	uint32_t reserved;
+	struct cam_icp_dev_ver dev_ver[CAM_ICP_DEV_TYPE_MAX];
+};
+
+struct cam_icp_a5_acquire_dev {
+	uint32_t ctx_id;
+	struct cam_icp_acquire_dev_info icp_acquire_info;
+	struct cam_icp_res_info icp_out_acquire_info[2];
+	uint32_t fw_handle;
+};
+
+struct cam_icp_a5_set_irq_cb {
+	int32_t (*icp_hw_mgr_cb)(uint32_t irq_status, void *data);
+	void *data;
+};
+
+struct cam_icp_a5_test_irq {
+	uint32_t test_irq;
+};
+#endif /* CAM_A5_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
new file mode 100644
index 0000000..d1d6847
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_bps_hw_intf.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_BPS_HW_INTF_H
+#define CAM_BPS_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+/* BPS register */
+#define BPS_TOP_RST_CMD              0x1008
+#define BPS_CDM_RST_CMD              0x10
+#define BPS_CDM_IRQ_STATUS           0x44
+#define BPS_TOP_IRQ_STATUS           0x100C
+
+/* BPS CDM/TOP status register */
+#define BPS_RST_DONE_IRQ_STATUS_BIT  0x1
+
+enum cam_icp_bps_cmd_type {
+	CAM_ICP_BPS_CMD_FW_DOWNLOAD,
+	CAM_ICP_BPS_CMD_POWER_COLLAPSE,
+	CAM_ICP_BPS_CMD_POWER_RESUME,
+	CAM_ICP_BPS_CMD_SET_FW_BUF,
+	CAM_ICP_BPS_CMD_VOTE_CPAS,
+	CAM_ICP_BPS_CMD_CPAS_START,
+	CAM_ICP_BPS_CMD_CPAS_STOP,
+	CAM_ICP_BPS_CMD_UPDATE_CLK,
+	CAM_ICP_BPS_CMD_DISABLE_CLK,
+	CAM_ICP_BPS_CMD_RESET,
+	CAM_ICP_BPS_CMD_MAX,
+};
+
+#endif /* CAM_BPS_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
new file mode 100644
index 0000000..a6eed99
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_icp_hw_intf.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_ICP_HW_INTF_H
+#define CAM_ICP_HW_INTF_H
+
+#define CAM_ICP_CMD_BUF_MAX_SIZE     128
+#define CAM_ICP_MSG_BUF_MAX_SIZE     CAM_ICP_CMD_BUF_MAX_SIZE
+
+enum cam_a5_hw_type {
+	CAM_ICP_DEV_A5,
+	CAM_ICP_DEV_IPE,
+	CAM_ICP_DEV_BPS,
+	CAM_ICP_DEV_MAX,
+};
+
+/**
+ * struct cam_a5_clk_update_cmd - Payload for hw manager command
+ *
+ * @curr_clk_rate:        clk rate to HW
+ * @ipe_bps_pc_enable     power collpase enable flag
+ */
+struct cam_a5_clk_update_cmd {
+	uint32_t  curr_clk_rate;
+	bool  ipe_bps_pc_enable;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
new file mode 100644
index 0000000..a0a161b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include/cam_ipe_hw_intf.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_IPE_HW_INTF_H
+#define CAM_IPE_HW_INTF_H
+
+#include <uapi/media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "cam_hw_mgr_intf.h"
+#include "cam_icp_hw_intf.h"
+
+/* IPE registers */
+#define IPE_TOP_RST_CMD              0x1008
+#define IPE_CDM_RST_CMD              0x10
+#define IPE_CDM_IRQ_STATUS           0x44
+#define IPE_TOP_IRQ_STATUS           0x100C
+
+/* IPE CDM/TOP status register */
+#define IPE_RST_DONE_IRQ_STATUS_BIT  0x1
+
+enum cam_icp_ipe_cmd_type {
+	CAM_ICP_IPE_CMD_FW_DOWNLOAD,
+	CAM_ICP_IPE_CMD_POWER_COLLAPSE,
+	CAM_ICP_IPE_CMD_POWER_RESUME,
+	CAM_ICP_IPE_CMD_SET_FW_BUF,
+	CAM_ICP_IPE_CMD_VOTE_CPAS,
+	CAM_ICP_IPE_CMD_CPAS_START,
+	CAM_ICP_IPE_CMD_CPAS_STOP,
+	CAM_ICP_IPE_CMD_UPDATE_CLK,
+	CAM_ICP_IPE_CMD_DISABLE_CLK,
+	CAM_ICP_IPE_CMD_RESET,
+	CAM_ICP_IPE_CMD_MAX,
+};
+
+#endif /* CAM_IPE_HW_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
new file mode 100644
index 0000000..8bd738d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/include/cam_icp_hw_mgr_intf.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_ICP_HW_MGR_INTF_H
+#define CAM_ICP_HW_MGR_INTF_H
+
+#include <uapi/media/cam_icp.h>
+#include <uapi/media/cam_defs.h>
+#include <linux/of.h>
+#include "cam_cpas_api.h"
+
+#define ICP_CLK_TURBO_HZ         600000000
+#define ICP_CLK_SVS_HZ           400000000
+
+#define CAM_ICP_A5_BW_BYTES_VOTE 40000000
+
+#define CAM_ICP_CTX_MAX          36
+
+#define CPAS_IPE1_BIT            0x2000
+
+int cam_icp_hw_mgr_init(struct device_node *of_node,
+	uint64_t *hw_mgr_hdl, int *iommu_hdl);
+
+/**
+ * struct cam_icp_cpas_vote
+ * @ahb_vote: AHB vote info
+ * @axi_vote: AXI vote info
+ * @ahb_vote_valid: Flag for ahb vote data
+ * @axi_vote_valid: flag for axi vote data
+ */
+struct cam_icp_cpas_vote {
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	uint32_t ahb_vote_valid;
+	uint32_t axi_vote_valid;
+};
+
+#endif /* CAM_ICP_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile
new file mode 100644
index 0000000..e74c058
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/icp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_icp/fw_inc
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += ipe_dev.o ipe_core.o ipe_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
new file mode 100644
index 0000000..2cea924
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/iopoll.h>
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "ipe_core.h"
+#include "ipe_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_ipe_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "hfi_reg.h"
+
+#define HFI_MAX_POLL_TRY 5
+
+static int cam_ipe_caps_vote(struct cam_ipe_device_core_info *core_info,
+	struct cam_icp_cpas_vote *cpas_vote)
+{
+	int rc = 0;
+
+	if (cpas_vote->ahb_vote_valid)
+		rc = cam_cpas_update_ahb_vote(core_info->cpas_handle,
+			&cpas_vote->ahb_vote);
+	if (cpas_vote->axi_vote_valid)
+		rc = cam_cpas_update_axi_vote(core_info->cpas_handle,
+			&cpas_vote->axi_vote);
+
+	if (rc)
+		CAM_ERR(CAM_ICP, "cpas vote is failed: %d", rc);
+
+	return rc;
+}
+
+int cam_ipe_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *ipe_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ipe_device_core_info *core_info = NULL;
+	struct cam_icp_cpas_vote cpas_vote;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &ipe_dev->soc_info;
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	cpas_vote.ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	cpas_vote.ahb_vote.vote.level = CAM_SVS_VOTE;
+	cpas_vote.axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	cpas_vote.axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&cpas_vote.ahb_vote, &cpas_vote.axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "cpass start failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_start = true;
+
+	rc = cam_ipe_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "soc enable is failed : %d", rc);
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	} else {
+		core_info->clk_enable = true;
+	}
+
+	return rc;
+}
+
+int cam_ipe_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *ipe_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ipe_device_core_info *core_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &ipe_dev->soc_info;
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+	if ((!soc_info) || (!core_info)) {
+		CAM_ERR(CAM_ICP, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	rc = cam_ipe_disable_soc_resources(soc_info, core_info->clk_enable);
+	if (rc)
+		CAM_ERR(CAM_ICP, "soc disable is failed : %d", rc);
+	core_info->clk_enable = false;
+
+	if (core_info->cpas_start) {
+		if (cam_cpas_stop(core_info->cpas_handle))
+			CAM_ERR(CAM_ICP, "cpas stop is failed");
+		else
+			core_info->cpas_start = false;
+	}
+
+	return rc;
+}
+
+static int cam_ipe_handle_pc(struct cam_hw_info *ipe_dev)
+{
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ipe_device_core_info *core_info = NULL;
+	struct cam_ipe_device_hw_info *hw_info = NULL;
+	int pwr_ctrl;
+	int pwr_status;
+
+	soc_info = &ipe_dev->soc_info;
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+	hw_info = core_info->ipe_hw_info;
+
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+		true, &pwr_ctrl);
+	if (!(pwr_ctrl & IPE_COLLAPSE_MASK)) {
+		cam_cpas_reg_read(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+			true, &pwr_status);
+		cam_cpas_reg_write(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP,
+			hw_info->pwr_ctrl, true, 0x1);
+
+		if (pwr_status >> IPE_PWR_ON_MASK)
+			return -EINVAL;
+
+	}
+	cam_ipe_get_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+		true, &pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+		true, &pwr_status);
+	CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	return 0;
+}
+
+static int cam_ipe_handle_resume(struct cam_hw_info *ipe_dev)
+{
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ipe_device_core_info *core_info = NULL;
+	struct cam_ipe_device_hw_info *hw_info = NULL;
+	int pwr_ctrl;
+	int pwr_status;
+	int rc = 0;
+
+	soc_info = &ipe_dev->soc_info;
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+	hw_info = core_info->ipe_hw_info;
+
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl,
+		true, &pwr_ctrl);
+	if (pwr_ctrl & IPE_COLLAPSE_MASK) {
+		CAM_DBG(CAM_ICP, "IPE pwr_ctrl set(%x)", pwr_ctrl);
+		cam_cpas_reg_write(core_info->cpas_handle,
+			CAM_CPAS_REG_CPASTOP,
+			hw_info->pwr_ctrl, true, 0);
+	}
+
+	rc = cam_ipe_transfer_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_ctrl, true, &pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, hw_info->pwr_status,
+		true, &pwr_status);
+	CAM_DBG(CAM_ICP, "pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	return rc;
+}
+
+static int cam_ipe_cmd_reset(struct cam_hw_soc_info *soc_info,
+	struct cam_ipe_device_core_info *core_info)
+{
+	int pwr_ctrl, pwr_status, rc = 0;
+	uint32_t status = 0, retry_cnt = 0;
+	bool reset_ipe_cdm_fail = false;
+	bool reset_ipe_top_fail = false;
+
+	CAM_DBG(CAM_ICP, "CAM_ICP_IPE_CMD_RESET");
+	/* IPE CDM core reset*/
+	cam_io_w_mb((uint32_t)0xF,
+		soc_info->reg_map[0].mem_base + IPE_CDM_RST_CMD);
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((soc_info->reg_map[0].mem_base +
+			IPE_CDM_IRQ_STATUS),
+			status, ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+			100, 10000);
+
+		CAM_DBG(CAM_HFI, "ipe_cdm_irq_status = %u", status);
+
+		if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+			break;
+		retry_cnt++;
+	}
+	status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		IPE_CDM_IRQ_STATUS);
+	if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+		CAM_ERR(CAM_ICP, "IPE CDM rst failed status 0x%x", status);
+		reset_ipe_cdm_fail = true;
+	}
+
+	/* IPE reset*/
+	status = 0;
+	cam_io_w_mb((uint32_t)0x3,
+		soc_info->reg_map[0].mem_base + IPE_TOP_RST_CMD);
+	while (retry_cnt < HFI_MAX_POLL_TRY) {
+		readw_poll_timeout((soc_info->reg_map[0].mem_base +
+			IPE_TOP_IRQ_STATUS),
+			status, ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1),
+			100, 10000);
+
+		CAM_DBG(CAM_HFI, "ipe_top_irq_status = %u", status);
+
+
+		if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) == 0x1)
+			break;
+		retry_cnt++;
+	}
+	status = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		IPE_TOP_IRQ_STATUS);
+	if ((status & IPE_RST_DONE_IRQ_STATUS_BIT) != 0x1) {
+		CAM_ERR(CAM_ICP, "IPE top rst failed status 0x%x", status);
+		reset_ipe_top_fail = true;
+	}
+
+	cam_ipe_get_gdsc_control(soc_info);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, core_info->ipe_hw_info->pwr_ctrl,
+		true, &pwr_ctrl);
+	cam_cpas_reg_read(core_info->cpas_handle,
+		CAM_CPAS_REG_CPASTOP, core_info->ipe_hw_info->pwr_status,
+		true, &pwr_status);
+	CAM_DBG(CAM_ICP, "(After)pwr_ctrl = %x pwr_status = %x",
+		pwr_ctrl, pwr_status);
+
+	if (reset_ipe_cdm_fail || reset_ipe_top_fail)
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *ipe_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_ipe_device_core_info *core_info = NULL;
+	struct cam_ipe_device_hw_info *hw_info = NULL;
+	int rc = 0;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_ICP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_ICP_IPE_CMD_MAX) {
+		CAM_ERR(CAM_ICP, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	soc_info = &ipe_dev->soc_info;
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+	hw_info = core_info->ipe_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ICP_IPE_CMD_VOTE_CPAS: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args)
+			return -EINVAL;
+
+		cam_ipe_caps_vote(core_info, cpas_vote);
+		break;
+	}
+
+	case CAM_ICP_IPE_CMD_CPAS_START: {
+		struct cam_icp_cpas_vote *cpas_vote = cmd_args;
+
+		if (!cmd_args)
+			return -EINVAL;
+
+		if (!core_info->cpas_start) {
+			rc = cam_cpas_start(core_info->cpas_handle,
+				&cpas_vote->ahb_vote, &cpas_vote->axi_vote);
+			core_info->cpas_start = true;
+		}
+		break;
+	}
+
+	case CAM_ICP_IPE_CMD_CPAS_STOP:
+		if (core_info->cpas_start) {
+			cam_cpas_stop(core_info->cpas_handle);
+			core_info->cpas_start = false;
+		}
+		break;
+	case CAM_ICP_IPE_CMD_POWER_COLLAPSE:
+		rc = cam_ipe_handle_pc(ipe_dev);
+		break;
+	case CAM_ICP_IPE_CMD_POWER_RESUME:
+		rc = cam_ipe_handle_resume(ipe_dev);
+		break;
+	case CAM_ICP_IPE_CMD_UPDATE_CLK: {
+		struct cam_a5_clk_update_cmd *clk_upd_cmd =
+			(struct cam_a5_clk_update_cmd *)cmd_args;
+		uint32_t clk_rate = clk_upd_cmd->curr_clk_rate;
+
+		CAM_DBG(CAM_ICP, "ipe_src_clk rate = %d", (int)clk_rate);
+		if (!core_info->clk_enable) {
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				cam_ipe_handle_pc(ipe_dev);
+				cam_cpas_reg_write(core_info->cpas_handle,
+					CAM_CPAS_REG_CPASTOP,
+					hw_info->pwr_ctrl, true, 0x0);
+			}
+			rc = cam_ipe_toggle_clk(soc_info, true);
+			if (rc)
+				CAM_ERR(CAM_ICP, "Enable failed");
+			else
+				core_info->clk_enable = true;
+			if (clk_upd_cmd->ipe_bps_pc_enable) {
+				rc = cam_ipe_handle_resume(ipe_dev);
+				if (rc)
+					CAM_ERR(CAM_ICP, "bps resume failed");
+			}
+		}
+		CAM_DBG(CAM_ICP, "clock rate %d", clk_rate);
+
+		rc = cam_ipe_update_clk_rate(soc_info, clk_rate);
+		if (rc)
+			CAM_ERR(CAM_ICP, "Failed to update clk");
+		}
+		break;
+	case CAM_ICP_IPE_CMD_DISABLE_CLK:
+		if (core_info->clk_enable == true)
+			cam_ipe_toggle_clk(soc_info, false);
+		core_info->clk_enable = false;
+		break;
+	case CAM_ICP_IPE_CMD_RESET:
+		rc = cam_ipe_cmd_reset(soc_info, core_info);
+		break;
+	default:
+		CAM_ERR(CAM_ICP, "Invalid Cmd Type:%u", cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+irqreturn_t cam_ipe_irq(int irq_num, void *data)
+{
+	return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
new file mode 100644
index 0000000..0aa316d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_core.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_IPE_CORE_H
+#define CAM_IPE_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+#define IPE_COLLAPSE_MASK 0x1
+#define IPE_PWR_ON_MASK   0x2
+
+struct cam_ipe_device_hw_info {
+	uint32_t hw_idx;
+	uint32_t pwr_ctrl;
+	uint32_t pwr_status;
+	uint32_t reserved;
+};
+
+struct cam_ipe_device_core_info {
+	struct cam_ipe_device_hw_info *ipe_hw_info;
+	uint32_t cpas_handle;
+	bool cpas_start;
+	bool clk_enable;
+};
+
+int cam_ipe_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_ipe_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_ipe_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_ipe_irq(int irq_num, void *data);
+
+#endif /* CAM_IPE_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
new file mode 100644
index 0000000..70130da
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_dev.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+#include "ipe_core.h"
+#include "ipe_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_icp_hw_intf.h"
+#include "cam_icp_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_ipe_device_hw_info cam_ipe_hw_info[] = {
+	{
+		.hw_idx = 0,
+		.pwr_ctrl = 0x4c,
+		.pwr_status = 0x48,
+		.reserved = 0,
+	},
+	{
+		.hw_idx = 1,
+		.pwr_ctrl = 0x54,
+		.pwr_status = 0x50,
+		.reserved = 0,
+	},
+};
+EXPORT_SYMBOL(cam_ipe_hw_info);
+
+static char ipe_dev_name[8];
+
+int cam_ipe_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_ipe_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = &soc_info->pdev->dev;
+	memcpy(cpas_register_params.identifier, "ipe", sizeof("ipe"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+int cam_ipe_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info            *ipe_dev = NULL;
+	struct cam_hw_intf            *ipe_dev_intf = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_ipe_device_core_info   *core_info = NULL;
+	struct cam_ipe_device_hw_info     *hw_info = NULL;
+	int                                rc = 0;
+	struct cam_cpas_query_cap query;
+	uint32_t cam_caps;
+	uint32_t hw_idx;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &hw_idx);
+
+	cam_cpas_get_hw_info(&query.camera_family,
+		&query.camera_version, &query.cpas_version, &cam_caps);
+	if ((!(cam_caps & CPAS_IPE1_BIT)) && (hw_idx)) {
+		CAM_ERR(CAM_ICP, "IPE1 hw idx = %d\n", hw_idx);
+		return -EINVAL;
+	}
+
+	ipe_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!ipe_dev_intf)
+		return -ENOMEM;
+
+	ipe_dev_intf->hw_idx = hw_idx;
+	ipe_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!ipe_dev) {
+		kfree(ipe_dev_intf);
+		return -ENOMEM;
+	}
+
+	memset(ipe_dev_name, 0, sizeof(ipe_dev_name));
+	snprintf(ipe_dev_name, sizeof(ipe_dev_name),
+		"ipe%1u", ipe_dev_intf->hw_idx);
+
+	ipe_dev->soc_info.pdev = pdev;
+	ipe_dev->soc_info.dev = &pdev->dev;
+	ipe_dev->soc_info.dev_name = ipe_dev_name;
+	ipe_dev_intf->hw_priv = ipe_dev;
+	ipe_dev_intf->hw_ops.init = cam_ipe_init_hw;
+	ipe_dev_intf->hw_ops.deinit = cam_ipe_deinit_hw;
+	ipe_dev_intf->hw_ops.process_cmd = cam_ipe_process_cmd;
+	ipe_dev_intf->hw_type = CAM_ICP_DEV_IPE;
+
+	CAM_DBG(CAM_ICP, "type %d index %d",
+		ipe_dev_intf->hw_type,
+		ipe_dev_intf->hw_idx);
+
+	platform_set_drvdata(pdev, ipe_dev_intf);
+
+	ipe_dev->core_info = kzalloc(sizeof(struct cam_ipe_device_core_info),
+		GFP_KERNEL);
+	if (!ipe_dev->core_info) {
+		kfree(ipe_dev);
+		kfree(ipe_dev_intf);
+		return -ENOMEM;
+	}
+	core_info = (struct cam_ipe_device_core_info *)ipe_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_DBG(CAM_ICP, "No ipe hardware info");
+		kfree(ipe_dev->core_info);
+		kfree(ipe_dev);
+		kfree(ipe_dev_intf);
+		rc = -EINVAL;
+		return rc;
+	}
+	hw_info = &cam_ipe_hw_info[ipe_dev_intf->hw_idx];
+	core_info->ipe_hw_info = hw_info;
+
+	rc = cam_ipe_init_soc_resources(&ipe_dev->soc_info, cam_ipe_irq,
+		ipe_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_ICP, "failed to init_soc");
+		kfree(ipe_dev->core_info);
+		kfree(ipe_dev);
+		kfree(ipe_dev_intf);
+		return rc;
+	}
+
+	CAM_DBG(CAM_ICP, "cam_ipe_init_soc_resources : %pK",
+		(void *)&ipe_dev->soc_info);
+	rc = cam_ipe_register_cpas(&ipe_dev->soc_info,
+		core_info, ipe_dev_intf->hw_idx);
+	if (rc < 0) {
+		kfree(ipe_dev->core_info);
+		kfree(ipe_dev);
+		kfree(ipe_dev_intf);
+		return rc;
+	}
+	ipe_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&ipe_dev->hw_mutex);
+	spin_lock_init(&ipe_dev->hw_lock);
+	init_completion(&ipe_dev->hw_complete);
+
+	CAM_DBG(CAM_ICP, "IPE%d probe successful",
+		ipe_dev_intf->hw_idx);
+
+	return rc;
+}
+
+static const struct of_device_id cam_ipe_dt_match[] = {
+	{
+		.compatible = "qcom,cam-ipe",
+		.data = &cam_ipe_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_ipe_dt_match);
+
+static struct platform_driver cam_ipe_driver = {
+	.probe = cam_ipe_probe,
+	.driver = {
+		.name = "cam-ipe",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ipe_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_ipe_init_module(void)
+{
+	return platform_driver_register(&cam_ipe_driver);
+}
+
+static void __exit cam_ipe_exit_module(void)
+{
+	platform_driver_unregister(&cam_ipe_driver);
+}
+
+module_init(cam_ipe_init_module);
+module_exit(cam_ipe_exit_module);
+MODULE_DESCRIPTION("CAM IPE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
new file mode 100644
index 0000000..8f9a6f6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_icp.h>
+#include "ipe_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+
+int cam_ipe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_set_mode(soc_info->rgltr[i],
+			REGULATOR_MODE_FAST);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+				soc_info->rgltr_name[i]);
+			goto rgltr_set_mode_failed;
+		}
+	}
+	return 0;
+
+rgltr_set_mode_failed:
+	for (i = i - 1; i >= 0; i--)
+		if (soc_info->rgltr[i])
+			regulator_set_mode(soc_info->rgltr[i],
+				REGULATOR_MODE_NORMAL);
+
+	return rc;
+}
+
+int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = regulator_set_mode(soc_info->rgltr[i],
+					REGULATOR_MODE_NORMAL);
+		if (rc) {
+			CAM_ERR(CAM_ICP, "Regulator set mode %s failed",
+				soc_info->rgltr_name[i]);
+			goto rgltr_set_mode_failed;
+		}
+	}
+	return 0;
+
+rgltr_set_mode_failed:
+	for (i = i - 1; i >= 0; i--)
+		if (soc_info->rgltr[i])
+			regulator_set_mode(soc_info->rgltr[i],
+					REGULATOR_MODE_FAST);
+
+	return rc;
+}
+
+static int cam_ipe_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ICP, "get ipe dt prop is failed");
+
+	return rc;
+}
+
+static int cam_ipe_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t ipe_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, ipe_irq_handler,
+		irq_data);
+
+	return rc;
+}
+
+int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t ipe_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_ipe_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	rc = cam_ipe_request_platform_resource(soc_info, ipe_irq_handler,
+		irq_data);
+	if (rc < 0)
+		return rc;
+
+	return rc;
+}
+
+int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, false);
+	if (rc) {
+		CAM_ERR(CAM_ICP, "enable platform failed");
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, disable_clk,
+		false);
+	if (rc)
+		CAM_ERR(CAM_ICP, "enable platform failed");
+
+	return rc;
+}
+
+int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate)
+{
+	int32_t src_clk_idx;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	src_clk_idx = soc_info->src_clk_idx;
+
+	if ((soc_info->clk_level_valid[CAM_TURBO_VOTE] == true) &&
+		(soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx] != 0) &&
+		(clk_rate > soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx])) {
+		CAM_DBG(CAM_ICP, "clk_rate %d greater than max, reset to %d",
+			clk_rate,
+			soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx]);
+		clk_rate = soc_info->clk_rate[CAM_TURBO_VOTE][src_clk_idx];
+	}
+
+	return cam_soc_util_set_src_clk_rate(soc_info, clk_rate);
+}
+
+int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable)
+{
+	int rc = 0;
+
+	if (clk_enable)
+		rc = cam_soc_util_clk_enable_default(soc_info, CAM_SVS_VOTE);
+	else
+		cam_soc_util_clk_disable_default(soc_info);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
new file mode 100644
index 0000000..7303cc9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_icp/icp_hw/ipe_hw/ipe_soc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_IPE_SOC_H
+#define CAM_IPE_SOC_H
+
+#include "cam_soc_util.h"
+
+int cam_ipe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t ipe_irq_handler, void *irq_data);
+
+int cam_ipe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_ipe_disable_soc_resources(struct cam_hw_soc_info *soc_info,
+	bool disable_clk);
+
+int cam_ipe_get_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_ipe_transfer_gdsc_control(struct cam_hw_soc_info *soc_info);
+
+int cam_ipe_update_clk_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_rate);
+int cam_ipe_toggle_clk(struct cam_hw_soc_info *soc_info, bool clk_enable);
+#endif /* CAM_IPE_SOC_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/Makefile b/drivers/media/platform/msm/camera/cam_isp/Makefile
new file mode 100644
index 0000000..4de4a7f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += isp_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_dev.o cam_isp_context.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
new file mode 100644
index 0000000..97e55f7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -0,0 +1,3284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/ratelimit.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+#include "cam_context_utils.h"
+#include "cam_cdm_util.h"
+#include "cam_isp_context.h"
+#include "cam_common_util.h"
+
+static const char isp_dev_name[] = "isp";
+
+#define INC_STATE_MONITOR_HEAD(head) \
+	(atomic64_add_return(1, head) % \
+	CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES)
+
+static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info);
+
+static void __cam_isp_ctx_update_state_monitor_array(
+	struct cam_isp_context *ctx_isp,
+	enum cam_isp_state_change_trigger trigger_type,
+	uint32_t req_id)
+{
+	int iterator = 0;
+
+	iterator = INC_STATE_MONITOR_HEAD(&ctx_isp->state_monitor_head);
+	ctx_isp->cam_isp_ctx_state_monitor[iterator].curr_state =
+		ctx_isp->substate_activated;
+	ctx_isp->cam_isp_ctx_state_monitor[iterator].trigger =
+		trigger_type;
+	ctx_isp->cam_isp_ctx_state_monitor[iterator].req_id =
+		req_id;
+	ctx_isp->cam_isp_ctx_state_monitor[iterator].evt_time_stamp =
+		jiffies_to_msecs(jiffies);
+}
+
+static const char *__cam_isp_ctx_substate_val_to_type(
+	uint32_t type)
+{
+	switch (type) {
+	case CAM_ISP_CTX_ACTIVATED_SOF:
+		return "SOF";
+	case CAM_ISP_CTX_ACTIVATED_APPLIED:
+		return "APPLIED";
+	case CAM_ISP_CTX_ACTIVATED_EPOCH:
+		return "EPOCH";
+	case CAM_ISP_CTX_ACTIVATED_BUBBLE:
+		return "BUBBLE";
+	case CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED:
+		return "BUBBLE_APPLIED";
+	case CAM_ISP_CTX_ACTIVATED_HALT:
+		return "HALT";
+	default:
+		return "CAM_ISP_CTX_INVALID_STATE";
+	}
+}
+
+static const char *__cam_isp_hw_evt_val_to_type(
+	uint32_t evt_id)
+{
+	switch (evt_id) {
+	case CAM_ISP_STATE_CHANGE_TRIGGER_ERROR:
+		return "ERROR";
+	case CAM_ISP_STATE_CHANGE_TRIGGER_SOF:
+		return "SOF";
+	case CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE:
+		return "REG_UPDATE";
+	case CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH:
+		return "EPOCH";
+	case CAM_ISP_STATE_CHANGE_TRIGGER_EOF:
+		return "EOF";
+	case CAM_ISP_STATE_CHANGE_TRIGGER_DONE:
+		return "DONE";
+	default:
+		return "CAM_ISP_EVENT_INVALID";
+	}
+}
+
+static void __cam_isp_ctx_dump_state_monitor_array(
+	struct cam_isp_context *ctx_isp)
+{
+	int i = 0;
+	uint64_t state_head = 0;
+	uint64_t index;
+
+	state_head = atomic64_read(&ctx_isp->state_monitor_head);
+	CAM_ERR_RATE_LIMIT(CAM_ISP,
+		"Dumping state information for preceding requests");
+
+	for (i = CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES - 1; i >= 0;
+		i--) {
+		index = (((state_head - i) +
+			CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES) %
+			CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES);
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+		"time[0x%llx] req_id[%u] state[%s] evt_type[%s]",
+		ctx_isp->cam_isp_ctx_state_monitor[index].evt_time_stamp,
+		ctx_isp->cam_isp_ctx_state_monitor[index].req_id,
+		__cam_isp_ctx_substate_val_to_type(
+		ctx_isp->cam_isp_ctx_state_monitor[index].curr_state),
+		__cam_isp_hw_evt_val_to_type(
+		ctx_isp->cam_isp_ctx_state_monitor[index].trigger));
+	}
+}
+
+static void cam_isp_ctx_dump_req(struct cam_isp_ctx_req *req_isp)
+{
+	int i = 0, rc = 0;
+	size_t len = 0;
+	uint32_t *buf_addr;
+	uint32_t *buf_start, *buf_end;
+
+	for (i = 0; i < req_isp->num_cfg; i++) {
+		rc = cam_packet_util_get_cmd_mem_addr(
+			req_isp->cfg[i].handle, &buf_addr, &len);
+		if (rc) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"Failed to get_cmd_mem_addr, rc=%d",
+				rc);
+		} else {
+			buf_start = (uint32_t *)((uint8_t *) buf_addr +
+				req_isp->cfg[i].offset);
+			buf_end = (uint32_t *)((uint8_t *) buf_start +
+				req_isp->cfg[i].len - 1);
+			if (len < (buf_end - buf_start + 1)) {
+				CAM_ERR(CAM_ISP,
+					"Invalid len %lld buf_start-end=%d",
+					len, (buf_end - buf_start + 1));
+				continue;
+			}
+			cam_cdm_util_dump_cmd_buf(buf_start, buf_end);
+		}
+	}
+}
+
+static int __cam_isp_ctx_enqueue_request_in_order(
+	struct cam_context *ctx, struct cam_ctx_request *req)
+{
+	struct cam_ctx_request           *req_current;
+	struct cam_ctx_request           *req_prev;
+	struct list_head                  temp_list;
+
+	INIT_LIST_HEAD(&temp_list);
+	spin_lock_bh(&ctx->lock);
+	if (list_empty(&ctx->pending_req_list)) {
+		list_add_tail(&req->list, &ctx->pending_req_list);
+	} else {
+		list_for_each_entry_safe_reverse(
+			req_current, req_prev, &ctx->pending_req_list, list) {
+			if (req->request_id < req_current->request_id) {
+				list_del_init(&req_current->list);
+				list_add(&req_current->list, &temp_list);
+				continue;
+			} else if (req->request_id == req_current->request_id) {
+				CAM_WARN(CAM_ISP,
+					"Received duplicated request %lld",
+					req->request_id);
+			}
+			break;
+		}
+		list_add_tail(&req->list, &ctx->pending_req_list);
+
+		if (!list_empty(&temp_list)) {
+			list_for_each_entry_safe(
+				req_current, req_prev, &temp_list, list) {
+				list_del_init(&req_current->list);
+				list_add_tail(&req_current->list,
+					&ctx->pending_req_list);
+			}
+		}
+	}
+	spin_unlock_bh(&ctx->lock);
+	return 0;
+}
+
+static int __cam_isp_ctx_enqueue_init_request(
+	struct cam_context *ctx, struct cam_ctx_request *req)
+{
+	int rc = 0;
+	struct cam_ctx_request           *req_old;
+	struct cam_isp_ctx_req           *req_isp_old;
+	struct cam_isp_ctx_req           *req_isp_new;
+
+	spin_lock_bh(&ctx->lock);
+	if (list_empty(&ctx->pending_req_list)) {
+		list_add_tail(&req->list, &ctx->pending_req_list);
+		CAM_DBG(CAM_ISP, "INIT packet added req id= %d",
+			req->request_id);
+		goto end;
+	}
+
+	req_old = list_first_entry(&ctx->pending_req_list,
+		struct cam_ctx_request, list);
+	req_isp_old = (struct cam_isp_ctx_req *) req_old->req_priv;
+	req_isp_new = (struct cam_isp_ctx_req *) req->req_priv;
+	if (req_isp_old->hw_update_data.packet_opcode_type ==
+		CAM_ISP_PACKET_INIT_DEV) {
+		if ((req_isp_old->num_cfg + req_isp_new->num_cfg) >=
+			CAM_ISP_CTX_CFG_MAX) {
+			CAM_WARN(CAM_ISP, "Can not merge INIT pkt");
+			rc = -ENOMEM;
+		}
+
+		if (req_isp_old->num_fence_map_out != 0 ||
+			req_isp_old->num_fence_map_in != 0) {
+			CAM_WARN(CAM_ISP, "Invalid INIT pkt sequence");
+			rc = -EINVAL;
+		}
+
+		if (!rc) {
+			memcpy(req_isp_old->fence_map_out,
+				req_isp_new->fence_map_out,
+				sizeof(req_isp_new->fence_map_out[0])*
+				req_isp_new->num_fence_map_out);
+			req_isp_old->num_fence_map_out =
+				req_isp_new->num_fence_map_out;
+
+			memcpy(req_isp_old->fence_map_in,
+				req_isp_new->fence_map_in,
+				sizeof(req_isp_new->fence_map_in[0])*
+				req_isp_new->num_fence_map_in);
+			req_isp_old->num_fence_map_in =
+				req_isp_new->num_fence_map_in;
+
+			memcpy(&req_isp_old->cfg[req_isp_old->num_cfg],
+				req_isp_new->cfg,
+				sizeof(req_isp_new->cfg[0])*
+				req_isp_new->num_cfg);
+			req_isp_old->num_cfg += req_isp_new->num_cfg;
+
+			req_old->request_id = req->request_id;
+
+			list_add_tail(&req->list, &ctx->free_req_list);
+		}
+	} else {
+		CAM_WARN(CAM_ISP,
+			"Received Update pkt before INIT pkt. req_id= %lld",
+			req->request_id);
+		rc = -EINVAL;
+	}
+end:
+	spin_unlock_bh(&ctx->lock);
+	return rc;
+}
+
+static const char *__cam_isp_resource_handle_id_to_type(
+	uint32_t resource_handle)
+{
+	switch (resource_handle) {
+	case CAM_ISP_IFE_OUT_RES_FULL:
+		return "FULL";
+	case CAM_ISP_IFE_OUT_RES_DS4:
+		return "DS4";
+	case CAM_ISP_IFE_OUT_RES_DS16:
+		return "DS16";
+	case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
+		return "RAW_DUMP";
+	case CAM_ISP_IFE_OUT_RES_FD:
+		return "FD";
+	case CAM_ISP_IFE_OUT_RES_PDAF:
+		return "PDAF";
+	case CAM_ISP_IFE_OUT_RES_2PD:
+		return "2PD";
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+		return "RDI_0";
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+		return "RDI_1";
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+		return "RDI_2";
+	case CAM_ISP_IFE_OUT_RES_RDI_3:
+		return "RDI_3";
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
+		return "STATS_HDR_BE";
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
+		return "STATS_HDR_BHIST";
+	case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
+		return "STATS_TL_BG";
+	case CAM_ISP_IFE_OUT_RES_STATS_BF:
+		return "STATS_BF";
+	case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
+		return "STATS_AWB_BG";
+	case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
+		return "STATS_BHIST";
+	case CAM_ISP_IFE_OUT_RES_STATS_RS:
+		return "STATS_RS";
+	case CAM_ISP_IFE_OUT_RES_STATS_CS:
+		return "STATS_CS";
+	default:
+		return "CAM_ISP_Invalid_Resource_Type";
+	}
+}
+
+static uint64_t __cam_isp_ctx_get_event_ts(uint32_t evt_id, void *evt_data)
+{
+	uint64_t ts = 0;
+
+	if (!evt_data)
+		return 0;
+
+	switch (evt_id) {
+	case CAM_ISP_HW_EVENT_ERROR:
+		ts = ((struct cam_isp_hw_error_event_data *)evt_data)->
+			timestamp;
+		break;
+	case CAM_ISP_HW_EVENT_SOF:
+		ts = ((struct cam_isp_hw_sof_event_data *)evt_data)->
+			timestamp;
+		break;
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		ts = ((struct cam_isp_hw_reg_update_event_data *)evt_data)->
+			timestamp;
+		break;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		ts = ((struct cam_isp_hw_epoch_event_data *)evt_data)->
+			timestamp;
+		break;
+	case CAM_ISP_HW_EVENT_EOF:
+		ts = ((struct cam_isp_hw_eof_event_data *)evt_data)->
+			timestamp;
+		break;
+	case CAM_ISP_HW_EVENT_DONE:
+		break;
+	default:
+		CAM_DBG(CAM_ISP, "Invalid Event Type %d", evt_id);
+	}
+
+	return ts;
+}
+
+static void __cam_isp_ctx_handle_buf_done_fail_log(
+	struct cam_isp_ctx_req *req_isp)
+{
+	int i;
+
+	if (req_isp->num_fence_map_out >= CAM_ISP_CTX_RES_MAX) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Num Resources exceed mMAX %d >= %d ",
+			req_isp->num_fence_map_out, CAM_ISP_CTX_RES_MAX);
+		return;
+	}
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP,
+		"Resource Handles that fail to generate buf_done in prev frame");
+	for (i = 0; i < req_isp->num_fence_map_out; i++) {
+		if (req_isp->fence_map_out[i].sync_id != -1)
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Resource_Handle: [%s] Sync_ID: [0x%x]",
+			__cam_isp_resource_handle_id_to_type(
+			req_isp->fence_map_out[i].resource_handle),
+			req_isp->fence_map_out[i].sync_id);
+	}
+}
+
+static int __cam_isp_ctx_handle_buf_done_in_activated_state(
+	struct cam_isp_context *ctx_isp,
+	struct cam_isp_hw_done_event_data *done,
+	uint32_t bubble_state)
+{
+	int rc = 0;
+	int i, j;
+	struct cam_ctx_request  *req;
+	struct cam_isp_ctx_req  *req_isp;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (list_empty(&ctx->active_req_list)) {
+		CAM_DBG(CAM_ISP, "Buf done with no active request!");
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter with bubble_state %d", bubble_state);
+
+	req = list_first_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+
+	trace_cam_buf_done("ISP", ctx, req);
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	for (i = 0; i < done->num_handles; i++) {
+		for (j = 0; j < req_isp->num_fence_map_out; j++) {
+			if (done->resource_handle[i] ==
+				req_isp->fence_map_out[j].resource_handle)
+				break;
+		}
+
+		if (j == req_isp->num_fence_map_out) {
+			CAM_ERR(CAM_ISP,
+				"Can not find matching lane handle 0x%x!",
+				done->resource_handle[i]);
+			rc = -EINVAL;
+			continue;
+		}
+
+		if (req_isp->fence_map_out[j].sync_id == -1) {
+			__cam_isp_ctx_handle_buf_done_fail_log(req_isp);
+			continue;
+		}
+
+		if (!req_isp->bubble_detected) {
+			CAM_DBG(CAM_ISP,
+				"Sync with success: req %lld res 0x%x fd 0x%x",
+				req->request_id,
+				req_isp->fence_map_out[j].resource_handle,
+				req_isp->fence_map_out[j].sync_id);
+
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_SUCCESS);
+			if (rc)
+				CAM_DBG(CAM_ISP, "Sync failed with rc = %d",
+					 rc);
+		} else if (!req_isp->bubble_report) {
+			CAM_DBG(CAM_ISP,
+				"Sync with failure: req %lld res 0x%x fd 0x%x",
+				req->request_id,
+				req_isp->fence_map_out[j].resource_handle,
+				req_isp->fence_map_out[j].sync_id);
+
+			rc = cam_sync_signal(req_isp->fence_map_out[j].sync_id,
+				CAM_SYNC_STATE_SIGNALED_ERROR);
+			if (rc)
+				CAM_ERR(CAM_ISP, "Sync failed with rc = %d",
+					rc);
+		} else {
+			/*
+			 * Ignore the buffer done if bubble detect is on
+			 * Increment the ack number here, and queue the
+			 * request back to pending list whenever all the
+			 * buffers are done.
+			 */
+			req_isp->num_acked++;
+			CAM_DBG(CAM_ISP,
+				"buf done with bubble state %d recovery %d",
+				bubble_state, req_isp->bubble_report);
+			continue;
+		}
+
+		CAM_DBG(CAM_ISP, "req %lld, reset sync id 0x%x",
+			req->request_id,
+			req_isp->fence_map_out[j].sync_id);
+		if (!rc) {
+			req_isp->num_acked++;
+			req_isp->fence_map_out[j].sync_id = -1;
+		}
+	}
+
+	if (req_isp->num_acked > req_isp->num_fence_map_out) {
+		/* Should not happen */
+		CAM_ERR(CAM_ISP,
+			"WARNING: req_id %lld num_acked %d > map_out %d",
+			req->request_id, req_isp->num_acked,
+			req_isp->num_fence_map_out);
+		WARN_ON(req_isp->num_acked > req_isp->num_fence_map_out);
+	}
+
+	if (req_isp->num_acked != req_isp->num_fence_map_out)
+		return rc;
+
+	ctx_isp->active_req_cnt--;
+
+	if (req_isp->bubble_detected && req_isp->bubble_report) {
+		req_isp->num_acked = 0;
+		req_isp->bubble_detected = false;
+		list_del_init(&req->list);
+		list_add(&req->list, &ctx->pending_req_list);
+
+		CAM_DBG(CAM_REQ,
+			"Move active request %lld to pending list(cnt = %d) [bubble recovery]",
+			 req->request_id, ctx_isp->active_req_cnt);
+	} else {
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+
+		CAM_DBG(CAM_REQ,
+			"Move active request %lld to free list(cnt = %d) [all fences done]",
+			 req->request_id, ctx_isp->active_req_cnt);
+	}
+
+end:
+	__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+		CAM_ISP_STATE_CHANGE_TRIGGER_DONE,
+		ctx_isp->base->req_list->request_id);
+	return rc;
+}
+
+static void __cam_isp_ctx_send_sof_boot_timestamp(
+	struct cam_isp_context *ctx_isp, uint64_t request_id,
+	uint32_t sof_event_status)
+{
+	struct cam_req_mgr_message   req_msg;
+
+	req_msg.session_hdl = ctx_isp->base->session_hdl;
+	req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
+	req_msg.u.frame_msg.request_id = request_id;
+	req_msg.u.frame_msg.timestamp = ctx_isp->boot_timestamp;
+	req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
+	req_msg.u.frame_msg.sof_status = sof_event_status;
+
+	CAM_DBG(CAM_ISP,
+		"request id:%lld frame number:%lld boot time stamp:0x%llx",
+		 request_id, ctx_isp->frame_id,
+		 ctx_isp->boot_timestamp);
+
+	if (cam_req_mgr_notify_message(&req_msg,
+		V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS,
+		V4L_EVENT_CAM_REQ_MGR_EVENT))
+		CAM_ERR(CAM_ISP,
+			"Error in notifying the boot time for req id:%lld",
+			request_id);
+}
+
+
+static void __cam_isp_ctx_send_sof_timestamp(
+	struct cam_isp_context *ctx_isp, uint64_t request_id,
+	uint32_t sof_event_status)
+{
+	struct cam_req_mgr_message   req_msg;
+
+	req_msg.session_hdl = ctx_isp->base->session_hdl;
+	req_msg.u.frame_msg.frame_id = ctx_isp->frame_id;
+	req_msg.u.frame_msg.request_id = request_id;
+	req_msg.u.frame_msg.timestamp = ctx_isp->sof_timestamp_val;
+	req_msg.u.frame_msg.link_hdl = ctx_isp->base->link_hdl;
+	req_msg.u.frame_msg.sof_status = sof_event_status;
+
+	CAM_DBG(CAM_ISP,
+		"request id:%lld frame number:%lld SOF time stamp:0x%llx",
+		 request_id, ctx_isp->frame_id,
+		ctx_isp->sof_timestamp_val);
+	CAM_DBG(CAM_ISP, "sof status:%d", sof_event_status);
+
+	if (cam_req_mgr_notify_message(&req_msg,
+		V4L_EVENT_CAM_REQ_MGR_SOF, V4L_EVENT_CAM_REQ_MGR_EVENT))
+		CAM_ERR(CAM_ISP,
+			"Error in notifying the sof time for req id:%lld",
+			request_id);
+
+	__cam_isp_ctx_send_sof_boot_timestamp(ctx_isp,
+		request_id, sof_event_status);
+
+}
+
+static int __cam_isp_ctx_reg_upd_in_epoch_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	if (ctx_isp->frame_id == 1)
+		CAM_DBG(CAM_ISP, "Reg update for early PCR");
+	else
+		CAM_WARN(CAM_ISP,
+			"Unexpected reg update in activated substate:%d for frame_id:%lld",
+			ctx_isp->substate_activated, ctx_isp->frame_id);
+	return 0;
+}
+
+static int __cam_isp_ctx_reg_upd_in_activated_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request  *req;
+	struct cam_context      *ctx = ctx_isp->base;
+	struct cam_isp_ctx_req  *req_isp;
+
+	if (list_empty(&ctx->wait_req_list)) {
+		CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
+		goto end;
+	}
+	req = list_first_entry(&ctx->wait_req_list,
+			struct cam_ctx_request, list);
+	list_del_init(&req->list);
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+	if (req_isp->num_fence_map_out != 0) {
+		list_add_tail(&req->list, &ctx->active_req_list);
+		ctx_isp->active_req_cnt++;
+		CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
+			 req->request_id, ctx_isp->active_req_cnt);
+	} else {
+		/* no io config, so the request is completed. */
+		list_add_tail(&req->list, &ctx->free_req_list);
+		CAM_DBG(CAM_ISP,
+			"move active request %lld to free list(cnt = %d)",
+			 req->request_id, ctx_isp->active_req_cnt);
+	}
+
+	/*
+	 * This function only called directly from applied and bubble applied
+	 * state so change substate here.
+	 */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_notify_sof_in_activated_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_req_mgr_trigger_notify  notify;
+	struct cam_context *ctx = ctx_isp->base;
+	struct cam_ctx_request  *req;
+	uint64_t  request_id  = 0;
+
+	/*
+	 * notify reqmgr with sof signal. Note, due to scheduling delay
+	 * we can run into situation that two active requests has already
+	 * be in the active queue while we try to do the notification.
+	 * In this case, we need to skip the current notification. This
+	 * helps the state machine to catch up the delay.
+	 */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
+		ctx_isp->active_req_cnt <= 2) {
+		if (ctx_isp->subscribe_event & CAM_TRIGGER_POINT_SOF) {
+			notify.link_hdl = ctx->link_hdl;
+			notify.dev_hdl = ctx->dev_hdl;
+			notify.frame_id = ctx_isp->frame_id;
+			notify.trigger = CAM_TRIGGER_POINT_SOF;
+
+			ctx->ctx_crm_intf->notify_trigger(&notify);
+			CAM_DBG(CAM_ISP, "Notify CRM  SOF frame %lld",
+				ctx_isp->frame_id);
+		}
+
+		list_for_each_entry(req, &ctx->active_req_list, list) {
+			if (req->request_id > ctx_isp->reported_req_id) {
+				request_id = req->request_id;
+				ctx_isp->reported_req_id = request_id;
+				break;
+			}
+		}
+
+		if (ctx_isp->substate_activated == CAM_ISP_CTX_ACTIVATED_BUBBLE)
+			request_id = 0;
+
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+	} else {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
+		rc = -EFAULT;
+	}
+
+	return 0;
+}
+
+static int __cam_isp_ctx_notify_eof_in_activated_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_req_mgr_trigger_notify  notify;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (!(ctx_isp->subscribe_event & CAM_TRIGGER_POINT_EOF))
+		return rc;
+
+	/* notify reqmgr with eof signal */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+		notify.trigger = CAM_TRIGGER_POINT_EOF;
+
+		ctx->ctx_crm_intf->notify_trigger(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM EOF frame %lld\n",
+			ctx_isp->frame_id);
+	} else {
+		CAM_ERR(CAM_ISP, "Can not notify EOF to CRM");
+		rc = -EFAULT;
+	}
+
+	return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_hw_error(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	return 0;
+}
+
+static int __cam_isp_ctx_sof_in_activated_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+	struct cam_ctx_request *req;
+	struct cam_context *ctx = ctx_isp->base;
+
+	req = list_last_entry(&ctx->pending_req_list,
+		struct cam_ctx_request, list);
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	ctx_isp->boot_timestamp = sof_event_data->boot_time;
+	__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+		CAM_ISP_STATE_CHANGE_TRIGGER_SOF, req->request_id);
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_reg_upd_in_sof(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_ctx_request *req = NULL;
+	struct cam_isp_ctx_req *req_isp;
+	struct cam_context *ctx = ctx_isp->base;
+
+	if (ctx->state != CAM_CTX_ACTIVATED && ctx_isp->frame_id > 1) {
+		CAM_DBG(CAM_ISP, "invalid RUP");
+		goto end;
+	}
+
+	/*
+	 * This is for the first update. The initial setting will
+	 * cause the reg_upd in the first frame.
+	 */
+	if (!list_empty(&ctx->wait_req_list)) {
+		req = list_first_entry(&ctx->wait_req_list,
+			struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		if (req_isp->num_fence_map_out == req_isp->num_acked)
+			list_add_tail(&req->list, &ctx->free_req_list);
+		else
+			CAM_ERR(CAM_ISP,
+				"receive rup in unexpected state");
+	}
+	if (req != NULL) {
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
+			req->request_id);
+	}
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_epoch_in_applied(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+	uint64_t  request_id = 0;
+
+	if (list_empty(&ctx->wait_req_list)) {
+		/*
+		 * If no wait req in epoch, this is an error case.
+		 * The recovery is to go back to sof state
+		 */
+		CAM_ERR(CAM_ISP, "No wait request");
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+		/* Send SOF event as empty frame*/
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+	req_isp->bubble_detected = true;
+
+	CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
+			ctx_isp->frame_id);
+	} else {
+		req_isp->bubble_report = 0;
+	}
+
+	/*
+	 * Always move the request to active list. Let buf done
+	 * function handles the rest.
+	 */
+	CAM_DBG(CAM_REQ, "move request %lld to active list(cnt = %d)",
+		req->request_id, ctx_isp->active_req_cnt);
+	ctx_isp->active_req_cnt++;
+	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
+
+	if (req->request_id > ctx_isp->reported_req_id) {
+		request_id = req->request_id;
+		ctx_isp->reported_req_id = request_id;
+	}
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_ERROR);
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CAM_DBG(CAM_ISP, "next substate %d",
+		ctx_isp->substate_activated);
+end:
+	if (request_id == 0) {
+		req = list_last_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
+	} else {
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, request_id);
+	}
+	return 0;
+}
+
+
+static int __cam_isp_ctx_buf_done_in_applied(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+	return rc;
+}
+
+
+static int __cam_isp_ctx_sof_in_epoch(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_context                    *ctx = ctx_isp->base;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+	struct cam_ctx_request *req;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	ctx_isp->boot_timestamp = sof_event_data->boot_time;
+
+	if (list_empty(&ctx->active_req_list))
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	else
+		CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+
+	req = list_last_entry(&ctx->active_req_list,
+		struct cam_ctx_request, list);
+	if (req)
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
+			ctx->req_list->request_id);
+	CAM_DBG(CAM_ISP, "next substate %d",
+		ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_buf_done_in_epoch(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 0);
+	return rc;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+	return rc;
+}
+
+static int __cam_isp_ctx_epoch_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+	uint64_t  request_id = 0;
+
+	/*
+	 * This means we missed the reg upd ack. So we need to
+	 * transition to BUBBLE state again.
+	 */
+
+	if (list_empty(&ctx->wait_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * Just go back to the bubble state.
+		 */
+		CAM_ERR(CAM_ISP, "No pending request.");
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+	req_isp->bubble_detected = true;
+
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		CAM_DBG(CAM_REQ,
+			"Notify CRM about Bubble req_id %llu frame %lld",
+			req->request_id, ctx_isp->frame_id);
+	} else {
+		req_isp->bubble_report = 0;
+	}
+
+	/*
+	 * Always move the request to active list. Let buf done
+	 * function handles the rest.
+	 */
+	CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+		req->request_id, ctx_isp->active_req_cnt);
+	ctx_isp->active_req_cnt++;
+	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
+
+	if (!req_isp->bubble_report) {
+		if (req->request_id > ctx_isp->reported_req_id) {
+			request_id = req->request_id;
+			ctx_isp->reported_req_id = request_id;
+			__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_ERROR);
+		} else
+			__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+				CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+	} else
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+end:
+	req = list_last_entry(&ctx->active_req_list, struct cam_ctx_request,
+		list);
+	if (req)
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH, req->request_id);
+	return 0;
+}
+
+static int __cam_isp_ctx_buf_done_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_hw_done_event_data *done =
+		(struct cam_isp_hw_done_event_data *) evt_data;
+
+	rc = __cam_isp_ctx_handle_buf_done_in_activated_state(ctx_isp, done, 1);
+	__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+		CAM_ISP_STATE_CHANGE_TRIGGER_DONE,
+		ctx_isp->base->req_list->request_id);
+	return rc;
+}
+
+static int __cam_isp_ctx_handle_error(struct cam_isp_context *ctx_isp,
+	void *evt_data)
+{
+	int                              rc = 0;
+	uint32_t                         i = 0;
+	bool                             found = 0;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_to_report = NULL;
+	struct cam_ctx_request          *req_to_dump = NULL;
+	struct cam_ctx_request          *req_temp;
+	struct cam_isp_ctx_req          *req_isp = NULL;
+	struct cam_isp_ctx_req          *req_isp_to_report = NULL;
+	struct cam_req_mgr_error_notify  notify;
+	uint64_t                         error_request_id;
+	struct cam_hw_fence_map_entry   *fence_map_out = NULL;
+
+	struct cam_context *ctx = ctx_isp->base;
+	struct cam_isp_hw_error_event_data  *error_event_data =
+			(struct cam_isp_hw_error_event_data *)evt_data;
+
+	uint32_t error_type = error_event_data->error_type;
+
+	CAM_DBG(CAM_ISP, "Enter error_type = %d", error_type);
+	if ((error_type == CAM_ISP_HW_ERROR_OVERFLOW) ||
+		(error_type == CAM_ISP_HW_ERROR_BUSIF_OVERFLOW))
+		notify.error = CRM_KMD_ERR_OVERFLOW;
+
+	/*
+	 * The error is likely caused by first request on the active list.
+	 * If active list is empty check wait list (maybe error hit as soon
+	 * as RUP and we handle error before RUP.
+	 */
+	if (list_empty(&ctx->active_req_list)) {
+		CAM_DBG(CAM_ISP,
+			"handling error with no active request");
+		if (list_empty(&ctx->wait_req_list)) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"Error with no active/wait request");
+			goto end;
+		} else {
+			req_to_dump = list_first_entry(&ctx->wait_req_list,
+				struct cam_ctx_request, list);
+		}
+	} else {
+		req_to_dump = list_first_entry(&ctx->active_req_list,
+			struct cam_ctx_request, list);
+	}
+
+	req_isp = (struct cam_isp_ctx_req *) req_to_dump->req_priv;
+	cam_isp_ctx_dump_req(req_isp);
+
+	__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+		CAM_ISP_STATE_CHANGE_TRIGGER_ERROR, req_to_dump->request_id);
+
+	list_for_each_entry_safe(req, req_temp,
+		&ctx->active_req_list, list) {
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		if (!req_isp->bubble_report) {
+			for (i = 0; i < req_isp->num_fence_map_out; i++) {
+				fence_map_out =
+					&req_isp->fence_map_out[i];
+				CAM_ERR(CAM_ISP, "req %llu, Sync fd %x",
+					req->request_id,
+					req_isp->fence_map_out[i].sync_id);
+				if (req_isp->fence_map_out[i].sync_id != -1) {
+					rc = cam_sync_signal(
+						fence_map_out->sync_id,
+						CAM_SYNC_STATE_SIGNALED_ERROR);
+					fence_map_out->sync_id = -1;
+				}
+			}
+			list_del_init(&req->list);
+			list_add_tail(&req->list, &ctx->free_req_list);
+			ctx_isp->active_req_cnt--;
+		} else {
+			found = 1;
+			break;
+		}
+	}
+
+	if (found)
+		goto move_to_pending;
+
+	list_for_each_entry_safe(req, req_temp,
+		&ctx->wait_req_list, list) {
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		if (!req_isp->bubble_report) {
+			for (i = 0; i < req_isp->num_fence_map_out; i++) {
+				fence_map_out =
+					&req_isp->fence_map_out[i];
+				CAM_ERR(CAM_ISP, "req %llu, Sync fd %x",
+					req->request_id,
+					req_isp->fence_map_out[i].sync_id);
+				if (req_isp->fence_map_out[i].sync_id != -1) {
+					rc = cam_sync_signal(
+						fence_map_out->sync_id,
+						CAM_SYNC_STATE_SIGNALED_ERROR);
+					fence_map_out->sync_id = -1;
+				}
+			}
+			list_del_init(&req->list);
+			list_add_tail(&req->list, &ctx->free_req_list);
+			ctx_isp->active_req_cnt--;
+		} else {
+			found = 1;
+			break;
+		}
+	}
+
+move_to_pending:
+	/*
+	 * If bubble recovery is enabled on any request we need to move that
+	 * request and all the subsequent requests to the pending list.
+	 * Note:
+	 * We need to traverse the active list in reverse order and add
+	 * to head of pending list.
+	 * e.g. pending current state: 10, 11 | active current state: 8, 9
+	 * intermittent for loop iteration- pending: 9, 10, 11 | active: 8
+	 * final state - pending: 8, 9, 10, 11 | active: NULL
+	 */
+	if (found) {
+		list_for_each_entry_safe_reverse(req, req_temp,
+			&ctx->active_req_list, list) {
+			req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+			list_del_init(&req->list);
+			list_add(&req->list, &ctx->pending_req_list);
+			ctx_isp->active_req_cnt--;
+		}
+		list_for_each_entry_safe_reverse(req, req_temp,
+			&ctx->wait_req_list, list) {
+			req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+			list_del_init(&req->list);
+			list_add(&req->list, &ctx->pending_req_list);
+			ctx_isp->active_req_cnt--;
+		}
+	}
+
+	do {
+		if (list_empty(&ctx->pending_req_list)) {
+			error_request_id = ctx_isp->last_applied_req_id + 1;
+			req_isp = NULL;
+			break;
+		}
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		error_request_id = ctx_isp->last_applied_req_id;
+
+		if (req_isp->bubble_report) {
+			req_to_report = req;
+			req_isp_to_report = req_to_report->req_priv;
+			break;
+		}
+
+		for (i = 0; i < req_isp->num_fence_map_out; i++) {
+			if (req_isp->fence_map_out[i].sync_id != -1)
+				rc = cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			req_isp->fence_map_out[i].sync_id = -1;
+		}
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->free_req_list);
+
+	} while (req->request_id < ctx_isp->last_applied_req_id);
+
+end:
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_err) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = error_request_id;
+
+		if (req_isp_to_report && req_isp_to_report->bubble_report) {
+			if (error_event_data->recovery_enabled)
+				notify.error = CRM_KMD_ERR_BUBBLE;
+		} else {
+			notify.error = CRM_KMD_ERR_FATAL;
+		}
+
+		CAM_WARN(CAM_ISP, "Notify CRM: req %lld, frame %lld\n",
+			error_request_id, ctx_isp->frame_id);
+
+		ctx->ctx_crm_intf->notify_err(&notify);
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HW_ERROR;
+	} else {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify ERRROR to CRM");
+		rc = -EFAULT;
+	}
+
+	CAM_DBG(CAM_ISP, "Exit");
+
+	return rc;
+}
+
+static struct cam_isp_ctx_irq_ops
+	cam_isp_ctx_activated_state_machine_irq[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_activated_state,
+			__cam_isp_ctx_reg_upd_in_sof,
+			__cam_isp_ctx_notify_sof_in_activated_state,
+			__cam_isp_ctx_notify_eof_in_activated_state,
+			NULL,
+		},
+	},
+	/* APPLIED */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_activated_state,
+			__cam_isp_ctx_reg_upd_in_activated_state,
+			__cam_isp_ctx_epoch_in_applied,
+			__cam_isp_ctx_notify_eof_in_activated_state,
+			__cam_isp_ctx_buf_done_in_applied,
+		},
+	},
+	/* EPOCH */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_epoch,
+			__cam_isp_ctx_reg_upd_in_epoch_state,
+			__cam_isp_ctx_notify_sof_in_activated_state,
+			__cam_isp_ctx_notify_eof_in_activated_state,
+			__cam_isp_ctx_buf_done_in_epoch,
+		},
+	},
+	/* BUBBLE */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_activated_state,
+			NULL,
+			__cam_isp_ctx_notify_sof_in_activated_state,
+			__cam_isp_ctx_notify_eof_in_activated_state,
+			__cam_isp_ctx_buf_done_in_bubble,
+		},
+	},
+	/* Bubble Applied */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_sof_in_activated_state,
+			__cam_isp_ctx_reg_upd_in_activated_state,
+			__cam_isp_ctx_epoch_in_bubble_applied,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble_applied,
+		},
+	},
+	/* HW ERROR */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_sof_in_activated_state,
+			__cam_isp_ctx_reg_upd_in_hw_error,
+			NULL,
+			NULL,
+			NULL,
+		},
+	},
+	/* HALT */
+	{
+	},
+};
+
+static int __cam_isp_ctx_apply_req_in_activated_state(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply,
+	uint32_t next_state)
+{
+	int rc = 0;
+	struct cam_ctx_request          *req;
+	struct cam_ctx_request          *active_req = NULL;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_ctx_req          *active_req_isp;
+	struct cam_isp_context          *ctx_isp = NULL;
+	struct cam_hw_config_args        cfg;
+
+	if (list_empty(&ctx->pending_req_list)) {
+		CAM_ERR(CAM_ISP, "No available request for Apply id %lld",
+			apply->request_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	/*
+	 * When the pipeline has issue, the requests can be queued up in the
+	 * pipeline. In this case, we should reject the additional request.
+	 * The maximum number of request allowed to be outstanding is 2.
+	 *
+	 */
+	ctx_isp = (struct cam_isp_context *) ctx->ctx_priv;
+
+	spin_lock_bh(&ctx->lock);
+	req = list_first_entry(&ctx->pending_req_list, struct cam_ctx_request,
+		list);
+	spin_unlock_bh(&ctx->lock);
+
+	/*
+	 * Check whether the request id is matching the tip, if not, this means
+	 * we are in the middle of the error handling. Need to reject this apply
+	 */
+	if (req->request_id != apply->request_id) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Invalid Request Id asking %llu existing %llu",
+			apply->request_id, req->request_id);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CAM_DBG(CAM_REQ, "Apply request %lld in substate %d", req->request_id,
+		ctx_isp->substate_activated);
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	if (ctx_isp->active_req_cnt >=  2) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Reject apply request (id %lld) due to congestion(cnt = %d)",
+			req->request_id,
+			ctx_isp->active_req_cnt);
+
+		spin_lock_bh(&ctx->lock);
+		if (!list_empty(&ctx->active_req_list))
+			active_req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+		else
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"WARNING: should not happen (cnt = %d) but active_list empty",
+				ctx_isp->active_req_cnt);
+		spin_unlock_bh(&ctx->lock);
+
+		if (active_req) {
+			active_req_isp =
+				(struct cam_isp_ctx_req *) active_req->req_priv;
+			__cam_isp_ctx_handle_buf_done_fail_log(active_req_isp);
+		}
+
+		rc = -EFAULT;
+		goto end;
+	}
+	req_isp->bubble_report = apply->report_if_bubble;
+
+	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	cfg.request_id = req->request_id;
+	cfg.hw_update_entries = req_isp->cfg;
+	cfg.num_hw_update_entries = req_isp->num_cfg;
+	cfg.priv  = &req_isp->hw_update_data;
+	cfg.init_packet = 0;
+
+	rc = ctx->hw_mgr_intf->hw_config(ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not apply the configuration");
+	} else {
+		spin_lock_bh(&ctx->lock);
+		ctx_isp->substate_activated = next_state;
+		ctx_isp->last_applied_req_id = apply->request_id;
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &ctx->wait_req_list);
+		CAM_DBG(CAM_ISP, "new substate state %d, applied req %lld",
+			next_state, ctx_isp->last_applied_req_id);
+		spin_unlock_bh(&ctx->lock);
+	}
+end:
+	if (ctx_isp != NULL) {
+		__cam_isp_ctx_update_state_monitor_array(ctx_isp,
+			CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
+			ctx->req_list->request_id);
+	}
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_sof(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CAM_DBG(CAM_ISP, "current substate %d",
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_epoch(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CAM_DBG(CAM_ISP, "current substate %d",
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req_in_bubble(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CAM_DBG(CAM_ISP, "current substate %d",
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED);
+	CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_flush_req(struct cam_context *ctx,
+	struct list_head *req_list, struct cam_req_mgr_flush_request *flush_req)
+{
+	int i, rc;
+	uint32_t cancel_req_id_found = 0;
+	struct cam_ctx_request           *req;
+	struct cam_ctx_request           *req_temp;
+	struct cam_isp_ctx_req           *req_isp;
+	struct list_head                  flush_list;
+
+	INIT_LIST_HEAD(&flush_list);
+	if (list_empty(req_list)) {
+		CAM_DBG(CAM_ISP, "request list is empty");
+		if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+			CAM_ERR(CAM_ISP, "no request to cancel");
+			return -EINVAL;
+		} else
+			return 0;
+	}
+
+	CAM_DBG(CAM_REQ, "Flush [%u] in progress for req_id %llu",
+		flush_req->type, flush_req->req_id);
+	list_for_each_entry_safe(req, req_temp, req_list, list) {
+		if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+			if (req->request_id != flush_req->req_id) {
+				continue;
+			} else {
+				list_del_init(&req->list);
+				list_add_tail(&req->list, &flush_list);
+				cancel_req_id_found = 1;
+				break;
+			}
+		}
+		list_del_init(&req->list);
+		list_add_tail(&req->list, &flush_list);
+	}
+
+	list_for_each_entry_safe(req, req_temp, &flush_list, list) {
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		for (i = 0; i < req_isp->num_fence_map_out; i++) {
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				CAM_DBG(CAM_ISP, "Flush req 0x%llx, fence %d",
+					 req->request_id,
+					req_isp->fence_map_out[i].sync_id);
+				rc = cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc)
+					CAM_ERR_RATE_LIMIT(CAM_ISP,
+						"signal fence failed\n");
+				req_isp->fence_map_out[i].sync_id = -1;
+			}
+		}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+		!cancel_req_id_found)
+		CAM_DBG(CAM_ISP,
+			"Flush request id:%lld is not found in the list",
+			flush_req->req_id);
+
+	return 0;
+}
+
+static int __cam_isp_ctx_flush_req_in_top_state(
+	struct cam_context *ctx,
+	struct cam_req_mgr_flush_request *flush_req)
+{
+	struct cam_isp_context           *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_isp_stop_args          stop_isp;
+	struct cam_hw_stop_args           stop_args;
+	struct cam_isp_start_args         start_isp;
+	int rc = 0;
+
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		CAM_INFO(CAM_ISP, "Last request id to flush is %lld",
+			flush_req->req_id);
+		ctx->last_flush_req = flush_req->req_id;
+	}
+
+	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
+	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+	spin_unlock_bh(&ctx->lock);
+
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		/* if active and wait list are empty, return */
+		spin_lock_bh(&ctx->lock);
+		if ((list_empty(&ctx->wait_req_list)) &&
+			(list_empty(&ctx->active_req_list))) {
+			spin_unlock_bh(&ctx->lock);
+			CAM_DBG(CAM_ISP, "active and wait list are empty");
+			goto end;
+		}
+		spin_unlock_bh(&ctx->lock);
+
+		/* Stop hw first before active list flush */
+		stop_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY;
+		stop_isp.stop_only = true;
+		stop_args.args = (void *)&stop_isp;
+		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+				&stop_args);
+
+		spin_lock_bh(&ctx->lock);
+		CAM_DBG(CAM_ISP, "try to flush wait list");
+		rc = __cam_isp_ctx_flush_req(ctx, &ctx->wait_req_list,
+		flush_req);
+		CAM_DBG(CAM_ISP, "try to flush active list");
+		rc = __cam_isp_ctx_flush_req(ctx, &ctx->active_req_list,
+		flush_req);
+		ctx_isp->active_req_cnt = 0;
+		spin_unlock_bh(&ctx->lock);
+
+		/* Start hw */
+		start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		start_isp.start_only = true;
+		start_isp.hw_config.priv = NULL;
+
+		rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+			&start_isp);
+	}
+
+end:
+	CAM_DBG(CAM_ISP, "Flush request in top state %d",
+		 ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_flush_req_in_ready(
+	struct cam_context *ctx,
+	struct cam_req_mgr_flush_request *flush_req)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
+	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, flush_req);
+
+	/* if nothing is in pending req list, change state to acquire */
+	if (list_empty(&ctx->pending_req_list))
+		ctx->state = CAM_CTX_ACQUIRED;
+	spin_unlock_bh(&ctx->lock);
+
+	trace_cam_context_state("ISP", ctx);
+
+	CAM_DBG(CAM_ISP, "Flush request in ready state. next state %d",
+		 ctx->state);
+	return rc;
+}
+
+static struct cam_ctx_ops
+	cam_isp_ctx_activated_state_machine[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_sof,
+		},
+		.irq_ops = NULL,
+	},
+	/* APPLIED */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* EPOCH */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_epoch,
+		},
+		.irq_ops = NULL,
+	},
+	/* BUBBLE */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_apply_req_in_bubble,
+		},
+		.irq_ops = NULL,
+	},
+	/* Bubble Applied */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* HW ERROR */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* HALT */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+};
+
+static int __cam_isp_ctx_rdi_only_sof_in_top_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	int rc = 0;
+	struct cam_context                    *ctx = ctx_isp->base;
+	struct cam_req_mgr_trigger_notify      notify;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+	uint64_t                               request_id  = 0;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	ctx_isp->boot_timestamp = sof_event_data->boot_time;
+
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+	/*
+	 * notify reqmgr with sof signal. Note, due to scheduling delay
+	 * we can run into situation that two active requests has already
+	 * be in the active queue while we try to do the notification.
+	 * In this case, we need to skip the current notification. This
+	 * helps the state machine to catch up the delay.
+	 */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger &&
+		ctx_isp->active_req_cnt <= 2) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+		notify.trigger = CAM_TRIGGER_POINT_SOF;
+
+		ctx->ctx_crm_intf->notify_trigger(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM  SOF frame %lld",
+			ctx_isp->frame_id);
+
+		/*
+		 * It is idle frame with out any applied request id, send
+		 * request id as zero
+		 */
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+	} else {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Can not notify SOF to CRM");
+	}
+
+	if (list_empty(&ctx->active_req_list))
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	else
+		CAM_DBG(CAM_ISP, "Still need to wait for the buf done");
+
+	CAM_DBG(CAM_ISP, "next substate %d",
+		ctx_isp->substate_activated);
+	return rc;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_applied_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	ctx_isp->boot_timestamp = sof_event_data->boot_time;
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED;
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+	return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_applied(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_ctx_request    *req;
+	struct cam_isp_ctx_req    *req_isp;
+	struct cam_context        *ctx = ctx_isp->base;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+	uint64_t  request_id = 0;
+
+	/*
+	 * Sof in bubble applied state means, reg update not received.
+	 * before increment frame id and override time stamp value, send
+	 * the previous sof time stamp that got captured in the
+	 * sof in applied state.
+	 */
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	ctx_isp->boot_timestamp = sof_event_data->boot_time;
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+
+	if (list_empty(&ctx->wait_req_list)) {
+		/*
+		 * If no pending req in epoch, this is an error case.
+		 * The recovery is to go back to sof state
+		 */
+		CAM_ERR(CAM_ISP, "No wait request");
+		ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+		/* Send SOF event as empty frame*/
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+		goto end;
+	}
+
+	req = list_first_entry(&ctx->wait_req_list, struct cam_ctx_request,
+		list);
+	req_isp = (struct cam_isp_ctx_req *)req->req_priv;
+	req_isp->bubble_detected = true;
+
+	CAM_DBG(CAM_ISP, "Report Bubble flag %d", req_isp->bubble_report);
+	if (req_isp->bubble_report && ctx->ctx_crm_intf &&
+		ctx->ctx_crm_intf->notify_err) {
+		struct cam_req_mgr_error_notify notify;
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.req_id = req->request_id;
+		notify.error = CRM_KMD_ERR_BUBBLE;
+		ctx->ctx_crm_intf->notify_err(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM about Bubble frame %lld",
+			ctx_isp->frame_id);
+	} else {
+		req_isp->bubble_report = 0;
+	}
+
+	/*
+	 * Always move the request to active list. Let buf done
+	 * function handles the rest.
+	 */
+	ctx_isp->active_req_cnt++;
+	list_del_init(&req->list);
+	list_add_tail(&req->list, &ctx->active_req_list);
+	CAM_DBG(CAM_ISP, "move request %lld to active list(cnt = %d)",
+			req->request_id, ctx_isp->active_req_cnt);
+
+	if (!req_isp->bubble_report) {
+		if (req->request_id > ctx_isp->reported_req_id) {
+			request_id = req->request_id;
+			ctx_isp->reported_req_id = request_id;
+			__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_ERROR);
+		} else
+			__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+				CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+	} else
+		__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+			CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+	/* change the state to bubble, as reg update has not come */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_BUBBLE;
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+end:
+	return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_sof_in_bubble_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	uint32_t i;
+	struct cam_ctx_request                *req;
+	struct cam_context                    *ctx = ctx_isp->base;
+	struct cam_req_mgr_trigger_notify      notify;
+	struct cam_isp_hw_sof_event_data      *sof_event_data = evt_data;
+	struct cam_isp_ctx_req                *req_isp;
+	uint64_t                               request_id  = 0;
+
+	if (!evt_data) {
+		CAM_ERR(CAM_ISP, "in valid sof event data");
+		return -EINVAL;
+	}
+
+	ctx_isp->frame_id++;
+	ctx_isp->sof_timestamp_val = sof_event_data->timestamp;
+	ctx_isp->boot_timestamp = sof_event_data->boot_time;
+	CAM_DBG(CAM_ISP, "frame id: %lld time stamp:0x%llx",
+		ctx_isp->frame_id, ctx_isp->sof_timestamp_val);
+	/*
+	 * Signal all active requests with error and move the  all the active
+	 * requests to free list
+	 */
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+			req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+		ctx_isp->active_req_cnt--;
+	}
+
+	/* notify reqmgr with sof signal */
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+		notify.trigger = CAM_TRIGGER_POINT_SOF;
+
+		ctx->ctx_crm_intf->notify_trigger(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM  SOF frame %lld",
+			ctx_isp->frame_id);
+
+	} else {
+		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+	}
+
+	/*
+	 * It is idle frame with out any applied request id, send
+	 * request id as zero
+	 */
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+	CAM_DBG(CAM_ISP, "next substate %d",
+		ctx_isp->substate_activated);
+
+	return 0;
+}
+
+static int __cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state(
+	struct cam_isp_context *ctx_isp, void *evt_data)
+{
+	struct cam_ctx_request  *req;
+	struct cam_context      *ctx = ctx_isp->base;
+	struct cam_isp_ctx_req  *req_isp;
+	struct cam_req_mgr_trigger_notify  notify;
+	uint64_t  request_id  = 0;
+
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_EPOCH;
+	/* notify reqmgr with sof signal*/
+	if (ctx->ctx_crm_intf && ctx->ctx_crm_intf->notify_trigger) {
+		if (list_empty(&ctx->wait_req_list)) {
+			CAM_ERR(CAM_ISP, "Reg upd ack with no waiting request");
+			goto error;
+		}
+		req = list_first_entry(&ctx->wait_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		request_id =
+			(req_isp->hw_update_data.packet_opcode_type ==
+				CAM_ISP_PACKET_INIT_DEV) ?
+			0 : req->request_id;
+
+		if (req_isp->num_fence_map_out != 0) {
+			list_add_tail(&req->list, &ctx->active_req_list);
+			ctx_isp->active_req_cnt++;
+			CAM_DBG(CAM_ISP,
+				"move request %lld to active list(cnt = %d)",
+				req->request_id, ctx_isp->active_req_cnt);
+			/* if packet has buffers, set correct request id */
+			request_id = req->request_id;
+		} else {
+			/* no io config, so the request is completed. */
+			list_add_tail(&req->list, &ctx->free_req_list);
+			CAM_DBG(CAM_ISP,
+				"move active req %lld to free list(cnt=%d)",
+				req->request_id, ctx_isp->active_req_cnt);
+		}
+
+		notify.link_hdl = ctx->link_hdl;
+		notify.dev_hdl = ctx->dev_hdl;
+		notify.frame_id = ctx_isp->frame_id;
+		notify.trigger = CAM_TRIGGER_POINT_SOF;
+
+		ctx->ctx_crm_intf->notify_trigger(&notify);
+		CAM_DBG(CAM_ISP, "Notify CRM  SOF frame %lld",
+			ctx_isp->frame_id);
+	} else {
+		CAM_ERR(CAM_ISP, "Can not notify SOF to CRM");
+	}
+	if (request_id)
+		ctx_isp->reported_req_id = request_id;
+
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+	return 0;
+error:
+	/* Send SOF event as idle frame*/
+	__cam_isp_ctx_send_sof_timestamp(ctx_isp, request_id,
+		CAM_REQ_MGR_SOF_EVENT_SUCCESS);
+
+	/*
+	 * There is no request in the pending list, move the sub state machine
+	 * to SOF sub state
+	 */
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+
+	return 0;
+}
+
+static struct cam_isp_ctx_irq_ops
+	cam_isp_ctx_rdi_only_activated_state_machine_irq
+			[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.irq_ops = {
+			NULL,
+			__cam_isp_ctx_rdi_only_sof_in_top_state,
+			__cam_isp_ctx_reg_upd_in_sof,
+			NULL,
+			NULL,
+			NULL,
+		},
+	},
+	/* APPLIED */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_rdi_only_sof_in_applied_state,
+			NULL,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_applied,
+		},
+	},
+	/* EPOCH */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_rdi_only_sof_in_top_state,
+			NULL,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_epoch,
+		},
+	},
+	/* BUBBLE*/
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_rdi_only_sof_in_bubble_state,
+			NULL,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble,
+		},
+	},
+	/* BUBBLE APPLIED ie PRE_BUBBLE */
+	{
+		.irq_ops = {
+			__cam_isp_ctx_handle_error,
+			__cam_isp_ctx_rdi_only_sof_in_bubble_applied,
+			__cam_isp_ctx_rdi_only_reg_upd_in_bubble_applied_state,
+			NULL,
+			NULL,
+			__cam_isp_ctx_buf_done_in_bubble_applied,
+		},
+	},
+	/* HW ERROR */
+	{
+	},
+	/* HALT */
+	{
+	},
+};
+
+static int __cam_isp_ctx_rdi_only_apply_req_top_state(
+	struct cam_context *ctx, struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CAM_DBG(CAM_ISP, "current substate %d",
+		ctx_isp->substate_activated);
+	rc = __cam_isp_ctx_apply_req_in_activated_state(ctx, apply,
+		CAM_ISP_CTX_ACTIVATED_APPLIED);
+	CAM_DBG(CAM_ISP, "new substate %d", ctx_isp->substate_activated);
+
+	return rc;
+}
+
+static struct cam_ctx_ops
+	cam_isp_ctx_rdi_only_activated_state_machine
+		[CAM_ISP_CTX_ACTIVATED_MAX] = {
+	/* SOF */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+		},
+		.irq_ops = NULL,
+	},
+	/* APPLIED */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* EPOCH */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {
+			.apply_req = __cam_isp_ctx_rdi_only_apply_req_top_state,
+		},
+		.irq_ops = NULL,
+	},
+	/* PRE BUBBLE */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* BUBBLE */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* HW ERROR */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* HALT */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+};
+
+/* top level state machine */
+static int __cam_isp_ctx_release_dev_in_top_state(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_release_args       rel_arg;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_req_mgr_flush_request flush_req;
+
+	if (ctx->link_hdl != -1) {
+		CAM_ERR(CAM_ISP, "ctx expects release dev after unlink");
+		rc =  -EAGAIN;
+		return rc;
+	}
+
+	if (cmd && ctx_isp->hw_ctx && ctx_isp->split_acquire) {
+		CAM_ERR(CAM_ISP, "ctx expects release HW before release dev");
+		return rc;
+	}
+
+	if (ctx_isp->hw_ctx) {
+		rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&rel_arg);
+		ctx_isp->hw_ctx = NULL;
+	}
+
+	ctx->session_hdl = -1;
+	ctx->dev_hdl = -1;
+	ctx->link_hdl = -1;
+	ctx->ctx_crm_intf = NULL;
+	ctx_isp->frame_id = 0;
+	ctx_isp->active_req_cnt = 0;
+	ctx_isp->reported_req_id = 0;
+	ctx_isp->hw_acquired = false;
+	ctx_isp->init_received = false;
+
+	/*
+	 * Ideally, we should never have any active request here.
+	 * But we still add some sanity check code here to help the debug
+	 */
+	if (!list_empty(&ctx->active_req_list))
+		CAM_ERR(CAM_ISP, "Active list is not empty");
+
+	/* Flush all the pending request list  */
+	flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
+	flush_req.link_hdl = ctx->link_hdl;
+	flush_req.dev_hdl = ctx->dev_hdl;
+
+	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
+	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
+	spin_unlock_bh(&ctx->lock);
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	trace_cam_context_state("ISP", ctx);
+	CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
+		ctx->ctx_id, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_release_hw_in_top_state(struct cam_context *ctx,
+	void *cmd)
+{
+	int rc = 0;
+	struct cam_hw_release_args       rel_arg;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_req_mgr_flush_request flush_req;
+
+	if (ctx_isp->hw_ctx) {
+		rel_arg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&rel_arg);
+		ctx_isp->hw_ctx = NULL;
+	} else {
+		CAM_ERR(CAM_ISP, "No hw resources acquired for this ctx");
+	}
+
+	ctx_isp->frame_id = 0;
+	ctx_isp->active_req_cnt = 0;
+	ctx_isp->reported_req_id = 0;
+	ctx_isp->hw_acquired = false;
+	ctx_isp->init_received = false;
+
+	/*
+	 * Ideally, we should never have any active request here.
+	 * But we still add some sanity check code here to help the debug
+	 */
+	if (!list_empty(&ctx->active_req_list))
+		CAM_WARN(CAM_ISP, "Active list is not empty");
+
+	/* Flush all the pending request list  */
+	flush_req.type = CAM_REQ_MGR_FLUSH_TYPE_ALL;
+	flush_req.link_hdl = ctx->link_hdl;
+	flush_req.dev_hdl = ctx->dev_hdl;
+
+	CAM_DBG(CAM_ISP, "try to flush pending list");
+	spin_lock_bh(&ctx->lock);
+	rc = __cam_isp_ctx_flush_req(ctx, &ctx->pending_req_list, &flush_req);
+	spin_unlock_bh(&ctx->lock);
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	trace_cam_context_state("ISP", ctx);
+	CAM_DBG(CAM_ISP, "Release device success[%u] next state %d",
+		ctx->ctx_id, ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_top_state(
+	struct cam_context *ctx, struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0, i;
+	struct cam_ctx_request           *req = NULL;
+	struct cam_isp_ctx_req           *req_isp;
+	uintptr_t                         packet_addr;
+	struct cam_packet                *packet;
+	size_t                            len = 0;
+	struct cam_hw_prepare_update_args cfg;
+	struct cam_req_mgr_add_request    add_req;
+	struct cam_isp_context           *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CAM_DBG(CAM_ISP, "get free request object......");
+
+	/* get free request */
+	spin_lock_bh(&ctx->lock);
+	if (!list_empty(&ctx->free_req_list)) {
+		req = list_first_entry(&ctx->free_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+	}
+	spin_unlock_bh(&ctx->lock);
+
+	if (!req) {
+		CAM_ERR(CAM_ISP, "No more request obj free");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	/* for config dev, only memory handle is supported */
+	/* map packet from the memhandle */
+	rc = cam_mem_get_cpu_buf((int32_t) cmd->packet_handle,
+		&packet_addr, &len);
+	if (rc != 0) {
+		CAM_ERR(CAM_ISP, "Can not get packet address");
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	packet = (struct cam_packet *)(packet_addr + (uint32_t)cmd->offset);
+	CAM_DBG(CAM_ISP, "pack_handle %llx", cmd->packet_handle);
+	CAM_DBG(CAM_ISP, "packet address is 0x%zx", packet_addr);
+	CAM_DBG(CAM_ISP, "packet with length %zu, offset 0x%llx",
+		len, cmd->offset);
+	CAM_DBG(CAM_ISP, "Packet request id %lld",
+		packet->header.request_id);
+	CAM_DBG(CAM_ISP, "Packet size 0x%x", packet->header.size);
+	CAM_DBG(CAM_ISP, "packet op %d", packet->header.op_code);
+
+	if (packet->header.request_id <= ctx->last_flush_req) {
+		CAM_INFO(CAM_ISP,
+			"request %lld has been flushed, reject packet",
+			packet->header.request_id);
+		rc = -EINVAL;
+		goto free_req;
+	}
+
+	if (packet->header.request_id > ctx->last_flush_req)
+		ctx->last_flush_req = 0;
+
+	/* preprocess the configuration */
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.packet = packet;
+	cfg.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	cfg.max_hw_update_entries = CAM_ISP_CTX_CFG_MAX;
+	cfg.hw_update_entries = req_isp->cfg;
+	cfg.max_out_map_entries = CAM_ISP_CTX_RES_MAX;
+	cfg.max_in_map_entries = CAM_ISP_CTX_RES_MAX;
+	cfg.out_map_entries = req_isp->fence_map_out;
+	cfg.in_map_entries = req_isp->fence_map_in;
+	cfg.priv  = &req_isp->hw_update_data;
+	cfg.pf_data = &(req->pf_data);
+
+	CAM_DBG(CAM_ISP, "try to prepare config packet......");
+
+	rc = ctx->hw_mgr_intf->hw_prepare_update(
+		ctx->hw_mgr_intf->hw_mgr_priv, &cfg);
+	if (rc != 0) {
+		CAM_ERR(CAM_ISP, "Prepare config packet failed in HW layer");
+		rc = -EFAULT;
+		goto free_req;
+	}
+	req_isp->num_cfg = cfg.num_hw_update_entries;
+	req_isp->num_fence_map_out = cfg.num_out_map_entries;
+	req_isp->num_fence_map_in = cfg.num_in_map_entries;
+	req_isp->num_acked = 0;
+	req_isp->bubble_detected = false;
+
+	for (i = 0; i < req_isp->num_fence_map_out; i++) {
+		rc = cam_sync_get_obj_ref(req_isp->fence_map_out[i].sync_id);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can't get ref for fence %d",
+				req_isp->fence_map_out[i].sync_id);
+			goto put_ref;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "num_entry: %d, num fence out: %d, num fence in: %d",
+		req_isp->num_cfg, req_isp->num_fence_map_out,
+		req_isp->num_fence_map_in);
+
+	req->request_id = packet->header.request_id;
+	req->status = 1;
+
+	CAM_DBG(CAM_ISP, "Packet request id %lld packet opcode:%d",
+		packet->header.request_id,
+		req_isp->hw_update_data.packet_opcode_type);
+
+	if (req_isp->hw_update_data.packet_opcode_type ==
+		CAM_ISP_PACKET_INIT_DEV) {
+		if (ctx->state < CAM_CTX_ACTIVATED) {
+			rc = __cam_isp_ctx_enqueue_init_request(ctx, req);
+			if (rc)
+				CAM_ERR(CAM_ISP, "Enqueue INIT pkt failed");
+			ctx_isp->init_received = true;
+		} else {
+			rc = -EINVAL;
+			CAM_ERR(CAM_ISP, "Recevied INIT pkt in wrong state");
+		}
+	} else {
+		if (ctx->state >= CAM_CTX_READY && ctx->ctx_crm_intf->add_req) {
+			add_req.link_hdl = ctx->link_hdl;
+			add_req.dev_hdl  = ctx->dev_hdl;
+			add_req.req_id   = req->request_id;
+			add_req.skip_before_applying = 0;
+			rc = ctx->ctx_crm_intf->add_req(&add_req);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "Add req failed: req id=%llu",
+					req->request_id);
+			} else {
+				__cam_isp_ctx_enqueue_request_in_order(
+					ctx, req);
+			}
+		} else {
+			rc = -EINVAL;
+			CAM_ERR(CAM_ISP, "Recevied Update in wrong state");
+		}
+	}
+	if (rc)
+		goto put_ref;
+
+	CAM_DBG(CAM_REQ,
+		"Preprocessing Config req_id %lld successful on ctx %u",
+		req->request_id, ctx->ctx_id);
+
+	return rc;
+
+put_ref:
+	for (--i; i >= 0; i--) {
+		rc = cam_sync_put_obj_ref(req_isp->fence_map_out[i].sync_id);
+		if (rc)
+			CAM_ERR(CAM_CTXT, "Failed to put ref of fence %d",
+				req_isp->fence_map_out[i].sync_id);
+	}
+free_req:
+	spin_lock_bh(&ctx->lock);
+	list_add_tail(&req->list, &ctx->free_req_list);
+	spin_unlock_bh(&ctx->lock);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_hw_acquire_args       param;
+	struct cam_isp_resource         *isp_res = NULL;
+	struct cam_create_dev_hdl        req_hdl_param;
+	struct cam_hw_release_args       release;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_hw_cmd_args           hw_cmd_args;
+	struct cam_isp_hw_cmd_args       isp_hw_cmd_args;
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_ISP, "HW interface is not ready");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP,
+		"session_hdl 0x%x, num_resources %d, hdl type %d, res %lld",
+		cmd->session_handle, cmd->num_resources,
+		cmd->handle_type, cmd->resource_hdl);
+
+	if (cmd->num_resources == CAM_API_COMPAT_CONSTANT) {
+		ctx_isp->split_acquire = true;
+		CAM_DBG(CAM_ISP, "Acquire dev handle");
+		goto get_dev_handle;
+	}
+
+	if (cmd->num_resources > CAM_ISP_CTX_RES_MAX) {
+		CAM_ERR(CAM_ISP, "Too much resources in the acquire");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	/* for now we only support user pointer */
+	if (cmd->handle_type != 1)  {
+		CAM_ERR(CAM_ISP, "Only user pointer is supported");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	isp_res = kzalloc(
+		sizeof(*isp_res)*cmd->num_resources, GFP_KERNEL);
+	if (!isp_res) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "start copy %d resources from user",
+		 cmd->num_resources);
+
+	if (copy_from_user(isp_res, u64_to_user_ptr(cmd->resource_hdl),
+		sizeof(*isp_res)*cmd->num_resources)) {
+		rc = -EFAULT;
+		goto free_res;
+	}
+
+	param.context_data = ctx;
+	param.event_cb = ctx->irq_cb_intf;
+	param.num_acq = cmd->num_resources;
+	param.acquire_info = (uintptr_t) isp_res;
+
+	/* call HW manager to reserve the resource */
+	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+		&param);
+	if (rc != 0) {
+		CAM_ERR(CAM_ISP, "Acquire device failed");
+		goto free_res;
+	}
+
+	/* Query the context has rdi only resource */
+	hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+				&hw_cmd_args);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "HW command failed");
+		goto free_hw;
+	}
+
+	if (isp_hw_cmd_args.u.is_rdi_only_context) {
+		/*
+		 * this context has rdi only resource assign rdi only
+		 * state machine
+		 */
+		CAM_DBG(CAM_ISP, "RDI only session Context");
+
+		ctx_isp->substate_machine_irq =
+			cam_isp_ctx_rdi_only_activated_state_machine_irq;
+		ctx_isp->substate_machine =
+			cam_isp_ctx_rdi_only_activated_state_machine;
+	} else {
+		CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
+		ctx_isp->substate_machine_irq =
+			cam_isp_ctx_activated_state_machine_irq;
+		ctx_isp->substate_machine =
+			cam_isp_ctx_activated_state_machine;
+	}
+
+	ctx_isp->rdi_only_context = isp_hw_cmd_args.u.is_rdi_only_context;
+	ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+	ctx_isp->hw_acquired = true;
+	ctx_isp->split_acquire = false;
+	ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
+
+	kfree(isp_res);
+	isp_res = NULL;
+
+get_dev_handle:
+
+	req_hdl_param.session_hdl = cmd->session_handle;
+	/* bridge is not ready for these flags. so false for now */
+	req_hdl_param.v4l2_sub_dev_flag = 0;
+	req_hdl_param.media_entity_flag = 0;
+	req_hdl_param.ops = ctx->crm_ctx_intf;
+	req_hdl_param.priv = ctx;
+
+	CAM_DBG(CAM_ISP, "get device handle form bridge");
+	ctx->dev_hdl = cam_create_device_hdl(&req_hdl_param);
+	if (ctx->dev_hdl <= 0) {
+		rc = -EFAULT;
+		CAM_ERR(CAM_ISP, "Can not create device handle");
+		goto free_hw;
+	}
+	cmd->dev_handle = ctx->dev_hdl;
+
+	/* store session information */
+	ctx->session_hdl = cmd->session_handle;
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	trace_cam_context_state("ISP", ctx);
+	CAM_DBG(CAM_ISP,
+		"Acquire success on session_hdl 0x%x num_rsrces %d ctx %u",
+		cmd->session_handle, cmd->num_resources, ctx->ctx_id);
+
+	return rc;
+
+free_hw:
+	release.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	if (ctx_isp->hw_acquired)
+		ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv,
+			&release);
+	ctx_isp->hw_ctx = NULL;
+	ctx_isp->hw_acquired = false;
+free_res:
+	kfree(isp_res);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_acquire_hw_v1(struct cam_context *ctx,
+	void *args)
+{
+	int rc = 0;
+	struct cam_acquire_hw_cmd_v1 *cmd =
+		(struct cam_acquire_hw_cmd_v1 *)args;
+	struct cam_hw_acquire_args       param;
+	struct cam_hw_release_args       release;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_hw_cmd_args           hw_cmd_args;
+	struct cam_isp_hw_cmd_args       isp_hw_cmd_args;
+	struct cam_isp_acquire_hw_info  *acquire_hw_info = NULL;
+
+	if (!ctx->hw_mgr_intf) {
+		CAM_ERR(CAM_ISP, "HW interface is not ready");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP,
+		"session_hdl 0x%x, hdl type %d, res %lld",
+		cmd->session_handle, cmd->handle_type, cmd->resource_hdl);
+
+	/* for now we only support user pointer */
+	if (cmd->handle_type != 1)  {
+		CAM_ERR(CAM_ISP, "Only user pointer is supported");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cmd->data_size < sizeof(*acquire_hw_info)) {
+		CAM_ERR(CAM_ISP, "data_size is not a valid value");
+		goto end;
+	}
+
+	acquire_hw_info = kzalloc(cmd->data_size, GFP_KERNEL);
+	if (!acquire_hw_info) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "start copy resources from user");
+
+	if (copy_from_user(acquire_hw_info, (void __user *)cmd->resource_hdl,
+		cmd->data_size)) {
+		rc = -EFAULT;
+		goto free_res;
+	}
+
+	param.context_data = ctx;
+	param.event_cb = ctx->irq_cb_intf;
+	param.num_acq = CAM_API_COMPAT_CONSTANT;
+	param.acquire_info_size = cmd->data_size;
+	param.acquire_info = (uint64_t) acquire_hw_info;
+
+	/* call HW manager to reserve the resource */
+	rc = ctx->hw_mgr_intf->hw_acquire(ctx->hw_mgr_intf->hw_mgr_priv,
+		&param);
+	if (rc != 0) {
+		CAM_ERR(CAM_ISP, "Acquire device failed");
+		goto free_res;
+	}
+
+	/* Query the context has rdi only resource */
+	hw_cmd_args.ctxt_to_hw_map = param.ctxt_to_hw_map;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+				&hw_cmd_args);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "HW command failed");
+		goto free_hw;
+	}
+
+	if (isp_hw_cmd_args.u.is_rdi_only_context) {
+		/*
+		 * this context has rdi only resource assign rdi only
+		 * state machine
+		 */
+		CAM_DBG(CAM_ISP, "RDI only session Context");
+
+		ctx_isp->substate_machine_irq =
+			cam_isp_ctx_rdi_only_activated_state_machine_irq;
+		ctx_isp->substate_machine =
+			cam_isp_ctx_rdi_only_activated_state_machine;
+	} else {
+		CAM_DBG(CAM_ISP, "Session has PIX or PIX and RDI resources");
+		ctx_isp->substate_machine_irq =
+			cam_isp_ctx_activated_state_machine_irq;
+		ctx_isp->substate_machine =
+			cam_isp_ctx_activated_state_machine;
+	}
+
+	ctx_isp->rdi_only_context = isp_hw_cmd_args.u.is_rdi_only_context;
+	ctx_isp->hw_ctx = param.ctxt_to_hw_map;
+	ctx_isp->hw_acquired = true;
+	ctx->ctxt_to_hw_map = param.ctxt_to_hw_map;
+
+	trace_cam_context_state("ISP", ctx);
+	CAM_DBG(CAM_ISP,
+		"Acquire success on session_hdl 0x%xs RDI only %d ctx %u",
+		ctx->session_hdl,
+		(isp_hw_cmd_args.u.is_rdi_only_context ? 1 : 0), ctx->ctx_id);
+	kfree(acquire_hw_info);
+	return rc;
+
+free_hw:
+	release.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	ctx->hw_mgr_intf->hw_release(ctx->hw_mgr_intf->hw_mgr_priv, &release);
+	ctx_isp->hw_ctx = NULL;
+	ctx_isp->hw_acquired = false;
+free_res:
+	kfree(acquire_hw_info);
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_acquire_hw_in_acquired(struct cam_context *ctx,
+	void *args)
+{
+	int rc = -EINVAL;
+	uint32_t api_version;
+
+	if (!ctx || !args) {
+		CAM_ERR(CAM_ISP, "Invalid input pointer");
+		return rc;
+	}
+
+	api_version = *((uint32_t *)args);
+	if (api_version == 1)
+		rc = __cam_isp_ctx_acquire_hw_v1(ctx, args);
+	else
+		CAM_ERR(CAM_ISP, "Unsupported api version %d", api_version);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (!ctx_isp->hw_acquired) {
+		CAM_ERR(CAM_ISP, "HW is not acquired, reject packet");
+		return -EINVAL;
+	}
+
+	rc = __cam_isp_ctx_config_dev_in_top_state(ctx, cmd);
+
+	if (!rc && (ctx->link_hdl >= 0)) {
+		ctx->state = CAM_CTX_READY;
+		trace_cam_context_state("ISP", ctx);
+	}
+
+	CAM_DBG(CAM_ISP, "next state %d", ctx->state);
+	return rc;
+}
+
+static int __cam_isp_ctx_link_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	CAM_DBG(CAM_ISP, "Enter.........");
+
+	ctx->link_hdl = link->link_hdl;
+	ctx->ctx_crm_intf = link->crm_cb;
+	ctx_isp->subscribe_event = link->subscribe_event;
+
+	/* change state only if we had the init config */
+	if (ctx_isp->init_received) {
+		ctx->state = CAM_CTX_READY;
+		trace_cam_context_state("ISP", ctx);
+	}
+
+	CAM_DBG(CAM_ISP, "next state %d", ctx->state);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	ctx->link_hdl = -1;
+	ctx->ctx_crm_intf = NULL;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_get_dev_info_in_acquired(struct cam_context *ctx,
+	struct cam_req_mgr_device_info *dev_info)
+{
+	int rc = 0;
+
+	dev_info->dev_hdl = ctx->dev_hdl;
+	strlcpy(dev_info->name, CAM_ISP_DEV_NAME, sizeof(dev_info->name));
+	dev_info->dev_id = CAM_REQ_MGR_DEVICE_IFE;
+	dev_info->p_delay = 1;
+	dev_info->trigger = CAM_TRIGGER_POINT_SOF;
+
+	return rc;
+}
+
+static int __cam_isp_ctx_start_dev_in_ready(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_isp_start_args        start_isp;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	if (cmd->session_handle != ctx->session_hdl ||
+		cmd->dev_handle != ctx->dev_hdl) {
+		rc = -EPERM;
+		goto end;
+	}
+
+	if (list_empty(&ctx->pending_req_list)) {
+		/* should never happen */
+		CAM_ERR(CAM_ISP, "Start device with empty configuration");
+		rc = -EFAULT;
+		goto end;
+	} else {
+		req = list_first_entry(&ctx->pending_req_list,
+			struct cam_ctx_request, list);
+	}
+	req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+
+	if (!ctx_isp->hw_ctx) {
+		CAM_ERR(CAM_ISP, "Wrong hw context pointer.");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	start_isp.hw_config.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	start_isp.hw_config.request_id = req->request_id;
+	start_isp.hw_config.hw_update_entries = req_isp->cfg;
+	start_isp.hw_config.num_hw_update_entries = req_isp->num_cfg;
+	start_isp.hw_config.priv  = &req_isp->hw_update_data;
+	start_isp.hw_config.init_packet = 1;
+	start_isp.start_only = false;
+
+	ctx_isp->frame_id = 0;
+	ctx_isp->active_req_cnt = 0;
+	ctx_isp->reported_req_id = 0;
+	ctx_isp->substate_activated = ctx_isp->rdi_only_context ?
+		CAM_ISP_CTX_ACTIVATED_APPLIED :
+		(req_isp->num_fence_map_out) ? CAM_ISP_CTX_ACTIVATED_EPOCH :
+		CAM_ISP_CTX_ACTIVATED_SOF;
+
+	/*
+	 * Only place to change state before calling the hw due to
+	 * hardware tasklet has higher priority that can cause the
+	 * irq handling comes early
+	 */
+	ctx->state = CAM_CTX_ACTIVATED;
+	trace_cam_context_state("ISP", ctx);
+	rc = ctx->hw_mgr_intf->hw_start(ctx->hw_mgr_intf->hw_mgr_priv,
+		&start_isp);
+	if (rc) {
+		/* HW failure. user need to clean up the resource */
+		CAM_ERR(CAM_ISP, "Start HW failed");
+		ctx->state = CAM_CTX_READY;
+		trace_cam_context_state("ISP", ctx);
+		goto end;
+	}
+	CAM_DBG(CAM_ISP, "start device success ctx %u", ctx->ctx_id);
+
+	list_del_init(&req->list);
+
+	if (req_isp->num_fence_map_out) {
+		list_add_tail(&req->list, &ctx->active_req_list);
+		ctx_isp->active_req_cnt++;
+	} else {
+		list_add_tail(&req->list, &ctx->wait_req_list);
+	}
+end:
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_ready(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	ctx->link_hdl = -1;
+	ctx->ctx_crm_intf = NULL;
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("ISP", ctx);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated_unlock(
+	struct cam_context *ctx, struct cam_start_stop_dev_cmd *stop_cmd)
+{
+	int rc = 0;
+	uint32_t i;
+	struct cam_hw_stop_args          stop;
+	struct cam_ctx_request          *req;
+	struct cam_isp_ctx_req          *req_isp;
+	struct cam_isp_context          *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+	struct cam_isp_stop_args         stop_isp;
+
+	/* Mask off all the incoming hardware events */
+	spin_lock_bh(&ctx->lock);
+	ctx_isp->substate_activated = CAM_ISP_CTX_ACTIVATED_HALT;
+	spin_unlock_bh(&ctx->lock);
+	CAM_DBG(CAM_ISP, "next substate %d", ctx_isp->substate_activated);
+
+	/* stop hw first */
+	if (ctx_isp->hw_ctx) {
+		stop.ctxt_to_hw_map = ctx_isp->hw_ctx;
+
+		if (stop_cmd)
+			stop_isp.hw_stop_cmd =
+				CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY;
+		else
+			stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+
+		stop_isp.stop_only = false;
+		stop.args = (void *) &stop_isp;
+		ctx->hw_mgr_intf->hw_stop(ctx->hw_mgr_intf->hw_mgr_priv,
+			&stop);
+	}
+
+	while (!list_empty(&ctx->pending_req_list)) {
+		req = list_first_entry(&ctx->pending_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CAM_DBG(CAM_ISP, "signal fence in pending list. fence num %d",
+			 req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	while (!list_empty(&ctx->wait_req_list)) {
+		req = list_first_entry(&ctx->wait_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CAM_DBG(CAM_ISP, "signal fence in wait list. fence num %d",
+			 req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+
+	while (!list_empty(&ctx->active_req_list)) {
+		req = list_first_entry(&ctx->active_req_list,
+				struct cam_ctx_request, list);
+		list_del_init(&req->list);
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		CAM_DBG(CAM_ISP, "signal fence in active list. fence num %d",
+			 req_isp->num_fence_map_out);
+		for (i = 0; i < req_isp->num_fence_map_out; i++)
+			if (req_isp->fence_map_out[i].sync_id != -1) {
+				cam_sync_signal(
+					req_isp->fence_map_out[i].sync_id,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+			}
+		list_add_tail(&req->list, &ctx->free_req_list);
+	}
+	ctx_isp->frame_id = 0;
+	ctx_isp->active_req_cnt = 0;
+	ctx_isp->reported_req_id = 0;
+
+	CAM_DBG(CAM_ISP, "Stop device success next state %d on ctx %u",
+		ctx->state, ctx->ctx_id);
+	return rc;
+}
+
+static int __cam_isp_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *)ctx->ctx_priv;
+
+	__cam_isp_ctx_stop_dev_in_activated_unlock(ctx, cmd);
+	ctx_isp->init_received = false;
+	ctx->state = CAM_CTX_ACQUIRED;
+	trace_cam_context_state("ISP", ctx);
+	return rc;
+}
+
+static int __cam_isp_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
+
+	rc = __cam_isp_ctx_release_dev_in_top_state(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Release device failed rc=%d", rc);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_release_hw_in_activated(struct cam_context *ctx,
+	void *cmd)
+{
+	int rc = 0;
+
+	rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Stop device failed rc=%d", rc);
+
+	rc = __cam_isp_ctx_release_hw_in_top_state(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Release hw failed rc=%d", rc);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_link_pause(struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args       hw_cmd_args;
+	struct cam_isp_hw_cmd_args   isp_hw_cmd_args;
+	struct cam_isp_context      *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_PAUSE_HW;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+		&hw_cmd_args);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_link_resume(struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args       hw_cmd_args;
+	struct cam_isp_hw_cmd_args   isp_hw_cmd_args;
+	struct cam_isp_context      *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_RESUME_HW;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+		&hw_cmd_args);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_handle_sof_freeze_evt(
+	struct cam_context *ctx)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args       hw_cmd_args;
+	struct cam_isp_hw_cmd_args   isp_hw_cmd_args;
+	struct cam_isp_context      *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	hw_cmd_args.ctxt_to_hw_map = ctx_isp->hw_ctx;
+	hw_cmd_args.cmd_type = CAM_HW_MGR_CMD_INTERNAL;
+	isp_hw_cmd_args.cmd_type = CAM_ISP_HW_MGR_CMD_SOF_DEBUG;
+	isp_hw_cmd_args.u.sof_irq_enable = 1;
+	hw_cmd_args.u.internal_args = (void *)&isp_hw_cmd_args;
+
+	rc = ctx->hw_mgr_intf->hw_cmd(ctx->hw_mgr_intf->hw_mgr_priv,
+		&hw_cmd_args);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_process_evt(struct cam_context *ctx,
+	struct cam_req_mgr_link_evt_data *link_evt_data)
+{
+	int rc = 0;
+
+	switch (link_evt_data->evt_type) {
+	case CAM_REQ_MGR_LINK_EVT_ERR:
+		/* No need to handle this message now */
+		break;
+	case CAM_REQ_MGR_LINK_EVT_PAUSE:
+		__cam_isp_ctx_link_pause(ctx);
+		break;
+	case CAM_REQ_MGR_LINK_EVT_RESUME:
+		__cam_isp_ctx_link_resume(ctx);
+		break;
+	case CAM_REQ_MGR_LINK_EVT_SOF_FREEZE:
+		__cam_isp_ctx_handle_sof_freeze_evt(ctx);
+		break;
+	default:
+		CAM_WARN(CAM_ISP, "Unknown event from CRM");
+		break;
+	}
+	return rc;
+}
+
+static int __cam_isp_ctx_unlink_in_activated(struct cam_context *ctx,
+	struct cam_req_mgr_core_dev_link_setup *unlink)
+{
+	int rc = 0;
+
+	CAM_WARN(CAM_ISP,
+		"Received unlink in activated state. It's unexpected");
+
+	rc = __cam_isp_ctx_stop_dev_in_activated_unlock(ctx, NULL);
+	if (rc)
+		CAM_WARN(CAM_ISP, "Stop device failed rc=%d", rc);
+
+	rc = __cam_isp_ctx_unlink_in_ready(ctx, unlink);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Unlink failed rc=%d", rc);
+
+	return rc;
+}
+
+static int __cam_isp_ctx_apply_req(struct cam_context *ctx,
+	struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_ctx_ops *ctx_ops = NULL;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *) ctx->ctx_priv;
+
+	trace_cam_apply_req("ISP", apply->request_id);
+	CAM_DBG(CAM_ISP, "Enter: apply req in Substate %d request _id:%lld",
+		 ctx_isp->substate_activated, apply->request_id);
+	ctx_ops = &ctx_isp->substate_machine[ctx_isp->substate_activated];
+	if (ctx_ops->crm_ops.apply_req) {
+		rc = ctx_ops->crm_ops.apply_req(ctx, apply);
+	} else {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No handle function in activated substate %d",
+			ctx_isp->substate_activated);
+		rc = -EFAULT;
+	}
+
+	if (rc)
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Apply failed in active substate %d",
+			ctx_isp->substate_activated);
+	return rc;
+}
+
+
+
+static int __cam_isp_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc = 0;
+	struct cam_isp_ctx_irq_ops *irq_ops = NULL;
+	struct cam_context *ctx = (struct cam_context *)context;
+	struct cam_isp_context *ctx_isp =
+		(struct cam_isp_context *)ctx->ctx_priv;
+
+	spin_lock(&ctx->lock);
+
+	trace_cam_isp_activated_irq(ctx, ctx_isp->substate_activated, evt_id,
+		__cam_isp_ctx_get_event_ts(evt_id, evt_data));
+
+	CAM_DBG(CAM_ISP, "Enter: State %d, Substate %d, evt id %d",
+		 ctx->state, ctx_isp->substate_activated, evt_id);
+	irq_ops = &ctx_isp->substate_machine_irq[ctx_isp->substate_activated];
+	if (irq_ops->irq_ops[evt_id]) {
+		rc = irq_ops->irq_ops[evt_id](ctx_isp, evt_data);
+	} else {
+		CAM_DBG(CAM_ISP, "No handle function for substate %d",
+			ctx_isp->substate_activated);
+		__cam_isp_ctx_dump_state_monitor_array(ctx_isp);
+	}
+
+	CAM_DBG(CAM_ISP, "Exit: State %d Substate %d",
+		 ctx->state, ctx_isp->substate_activated);
+	spin_unlock(&ctx->lock);
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_isp_ctx_top_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_isp_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.acquire_hw = __cam_isp_ctx_acquire_hw_in_acquired,
+			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
+			.config_dev = __cam_isp_ctx_config_dev_in_acquired,
+			.release_hw = __cam_isp_ctx_release_hw_in_top_state,
+		},
+		.crm_ops = {
+			.link = __cam_isp_ctx_link_in_acquired,
+			.unlink = __cam_isp_ctx_unlink_in_acquired,
+			.get_dev_info = __cam_isp_ctx_get_dev_info_in_acquired,
+			.flush_req = __cam_isp_ctx_flush_req_in_top_state,
+		},
+		.irq_ops = NULL,
+		.pagefault_ops = cam_isp_context_dump_active_request,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {
+			.start_dev = __cam_isp_ctx_start_dev_in_ready,
+			.release_dev = __cam_isp_ctx_release_dev_in_top_state,
+			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+			.release_hw = __cam_isp_ctx_release_hw_in_top_state,
+		},
+		.crm_ops = {
+			.unlink = __cam_isp_ctx_unlink_in_ready,
+			.flush_req = __cam_isp_ctx_flush_req_in_ready,
+		},
+		.irq_ops = NULL,
+		.pagefault_ops = cam_isp_context_dump_active_request,
+	},
+	/* Activated */
+	{
+		.ioctl_ops = {
+			.stop_dev = __cam_isp_ctx_stop_dev_in_activated,
+			.release_dev = __cam_isp_ctx_release_dev_in_activated,
+			.config_dev = __cam_isp_ctx_config_dev_in_top_state,
+			.release_hw = __cam_isp_ctx_release_hw_in_activated,
+		},
+		.crm_ops = {
+			.unlink = __cam_isp_ctx_unlink_in_activated,
+			.apply_req = __cam_isp_ctx_apply_req,
+			.flush_req = __cam_isp_ctx_flush_req_in_top_state,
+			.process_evt = __cam_isp_ctx_process_evt,
+		},
+		.irq_ops = __cam_isp_ctx_handle_irq_in_activated,
+		.pagefault_ops = cam_isp_context_dump_active_request,
+	},
+};
+
+
+static int cam_isp_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info)
+{
+
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request *req = NULL;
+	struct cam_ctx_request *req_temp = NULL;
+	struct cam_isp_ctx_req *req_isp  = NULL;
+	struct cam_isp_prepare_hw_update_data *hw_update_data = NULL;
+	struct cam_hw_mgr_dump_pf_data *pf_dbg_entry = NULL;
+	bool mem_found = false;
+	int rc = 0;
+
+	struct cam_isp_context *isp_ctx =
+		(struct cam_isp_context *)ctx->ctx_priv;
+
+	if (!isp_ctx) {
+		CAM_ERR(CAM_ISP, "Invalid isp ctx");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_ISP, "iommu fault handler for isp ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+		&ctx->active_req_list, list) {
+		req_isp = (struct cam_isp_ctx_req *) req->req_priv;
+		hw_update_data = &req_isp->hw_update_data;
+		pf_dbg_entry = &(req->pf_data);
+		CAM_INFO(CAM_ISP, "req_id : %lld ", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			iova, buf_info, &mem_found);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Failed to dump pf info");
+
+		if (mem_found)
+			CAM_ERR(CAM_ISP, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+
+	return rc;
+}
+
+int cam_isp_context_init(struct cam_isp_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_req_mgr_kmd_ops *crm_node_intf,
+	struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id)
+
+{
+	int rc = -1;
+	int i;
+
+	if (!ctx || !ctx_base) {
+		CAM_ERR(CAM_ISP, "Invalid Context");
+		goto err;
+	}
+
+	/* ISP context setup */
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->base = ctx_base;
+	ctx->frame_id = 0;
+	ctx->active_req_cnt = 0;
+	ctx->reported_req_id = 0;
+	ctx->hw_ctx = NULL;
+	ctx->substate_activated = CAM_ISP_CTX_ACTIVATED_SOF;
+	ctx->substate_machine = cam_isp_ctx_activated_state_machine;
+	ctx->substate_machine_irq = cam_isp_ctx_activated_state_machine_irq;
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++) {
+		ctx->req_base[i].req_priv = &ctx->req_isp[i];
+		ctx->req_isp[i].base = &ctx->req_base[i];
+	}
+
+	/* camera context setup */
+	rc = cam_context_init(ctx_base, isp_dev_name, CAM_ISP, ctx_id,
+		crm_node_intf, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Camera Context Base init failed");
+		goto err;
+	}
+
+	/* link camera context with isp context */
+	ctx_base->state_machine = cam_isp_ctx_top_state_machine;
+	ctx_base->ctx_priv = ctx;
+
+	/* initializing current state for error logging */
+	for (i = 0; i < CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES; i++) {
+		ctx->cam_isp_ctx_state_monitor[i].curr_state =
+		CAM_ISP_CTX_ACTIVATED_MAX;
+	}
+	atomic64_set(&ctx->state_monitor_head, -1);
+err:
+	return rc;
+}
+
+int cam_isp_context_deinit(struct cam_isp_context *ctx)
+{
+	int rc = 0;
+
+	if (ctx->base)
+		cam_context_deinit(ctx->base);
+
+	if (ctx->substate_activated != CAM_ISP_CTX_ACTIVATED_SOF)
+		CAM_ERR(CAM_ISP, "ISP context substate is invalid");
+
+	memset(ctx, 0, sizeof(*ctx));
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
new file mode 100644
index 0000000..957b8a1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ISP_CONTEXT_H_
+#define _CAM_ISP_CONTEXT_H_
+
+
+#include <linux/spinlock.h>
+#include <uapi/media/cam_isp.h>
+#include <uapi/media/cam_defs.h>
+
+#include "cam_context.h"
+#include "cam_isp_hw_mgr_intf.h"
+
+/*
+ * Maximum hw resource - This number is based on the maximum
+ * output port resource. The current maximum resource number
+ * is 24.
+ */
+#define CAM_ISP_CTX_RES_MAX                     24
+
+/*
+ * Maximum configuration entry size  - This is based on the
+ * worst case DUAL IFE use case plus some margin.
+ */
+#define CAM_ISP_CTX_CFG_MAX                     22
+
+/*
+ * Maximum entries in state monitoring array for error logging
+ */
+#define CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES   20
+
+/* forward declaration */
+struct cam_isp_context;
+
+/* cam isp context irq handling function type */
+typedef int (*cam_isp_hw_event_cb_func)(struct cam_isp_context *ctx_isp,
+	void *evt_data);
+
+/**
+ * enum cam_isp_ctx_activated_substate - sub states for activated
+ *
+ */
+enum cam_isp_ctx_activated_substate {
+	CAM_ISP_CTX_ACTIVATED_SOF,
+	CAM_ISP_CTX_ACTIVATED_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_EPOCH,
+	CAM_ISP_CTX_ACTIVATED_BUBBLE,
+	CAM_ISP_CTX_ACTIVATED_BUBBLE_APPLIED,
+	CAM_ISP_CTX_ACTIVATED_HW_ERROR,
+	CAM_ISP_CTX_ACTIVATED_HALT,
+	CAM_ISP_CTX_ACTIVATED_MAX,
+};
+
+/**
+ * enum cam_isp_state_change_trigger - Different types of ISP events
+ *
+ */
+enum cam_isp_state_change_trigger {
+	CAM_ISP_STATE_CHANGE_TRIGGER_ERROR,
+	CAM_ISP_STATE_CHANGE_TRIGGER_SOF,
+	CAM_ISP_STATE_CHANGE_TRIGGER_REG_UPDATE,
+	CAM_ISP_STATE_CHANGE_TRIGGER_EPOCH,
+	CAM_ISP_STATE_CHANGE_TRIGGER_EOF,
+	CAM_ISP_STATE_CHANGE_TRIGGER_DONE,
+	CAM_ISP_STATE_CHANGE_TRIGGER_MAX
+};
+
+/**
+ * struct cam_isp_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops:               Array of handle function pointers.
+ *
+ */
+struct cam_isp_ctx_irq_ops {
+	cam_isp_hw_event_cb_func         irq_ops[CAM_ISP_HW_EVENT_MAX];
+};
+
+/**
+ * struct cam_isp_ctx_req - ISP context request object
+ *
+ * @base:                  Common request object ponter
+ * @cfg:                   ISP hardware configuration array
+ * @num_cfg:               Number of ISP hardware configuration entries
+ * @fence_map_out:         Output fence mapping array
+ * @num_fence_map_out:     Number of the output fence map
+ * @fence_map_in:          Input fence mapping array
+ * @num_fence_map_in:      Number of input fence map
+ * @num_acked:             Count to track acked entried for output.
+ *                         If count equals the number of fence out, it means
+ *                         the request has been completed.
+ * @bubble_report:         Flag to track if bubble report is active on
+ *                         current request
+ * @hw_update_data:        HW update data for this request
+ *
+ */
+struct cam_isp_ctx_req {
+	struct cam_ctx_request               *base;
+
+	struct cam_hw_update_entry            cfg[CAM_ISP_CTX_CFG_MAX];
+	uint32_t                              num_cfg;
+	struct cam_hw_fence_map_entry         fence_map_out
+						[CAM_ISP_CTX_RES_MAX];
+	uint32_t                              num_fence_map_out;
+	struct cam_hw_fence_map_entry         fence_map_in[CAM_ISP_CTX_RES_MAX];
+	uint32_t                              num_fence_map_in;
+	uint32_t                              num_acked;
+	int32_t                               bubble_report;
+	struct cam_isp_prepare_hw_update_data hw_update_data;
+	bool                                  bubble_detected;
+};
+
+/**
+ * struct cam_isp_context_state_monitor - ISP context state
+ *                                        monitoring for
+ *                                        debug purposes
+ *
+ *@curr_state:          Current sub state that received req
+ *@req_type:            Event type of incoming req
+ *@req_id:              Request id
+ *@evt_time_stamp       Current time stamp
+ *
+ */
+struct cam_isp_context_state_monitor {
+	enum cam_isp_ctx_activated_substate  curr_state;
+	enum cam_isp_state_change_trigger    trigger;
+	uint32_t                             req_id;
+	int64_t                              frame_id;
+	uint64_t                             evt_time_stamp;
+};
+
+/**
+ * struct cam_isp_context   -  ISP context object
+ *
+ * @base:                      Common context object pointer
+ * @frame_id:                  Frame id tracking for the isp context
+ * @substate_actiavted:        Current substate for the activated state.
+ * @substate_machine:          ISP substate machine for external interface
+ * @substate_machine_irq:      ISP substate machine for irq handling
+ * @req_base:                  Common request object storage
+ * @req_isp:                   ISP private request object storage
+ * @hw_ctx:                    HW object returned by the acquire device command
+ * @sof_timestamp_val:         Captured time stamp value at sof hw event
+ * @boot_timestamp:            Boot time stamp for a given req_id
+ * @active_req_cnt:            Counter for the active request
+ * @reported_req_id:           Last reported request id
+ * @subscribe_event:           The irq event mask that CRM subscribes to, IFE
+ *                             will invoke CRM cb at those event.
+ * @last_applied_req_id:       Last applied request id
+ * @state_monitor_head:        Write index to the state monitoring array
+ * @cam_isp_ctx_state_monitor: State monitoring array
+ * @rdi_only_context:          Get context type information.
+ *                             true, if context is rdi only context
+ * @hw_acquired:               Indicate whether HW resources are acquired
+ * @init_received:             Indicate whether init config packet is received
+ * @split_acquire:             Indicate whether a separate acquire is expected
+ *
+ */
+struct cam_isp_context {
+	struct cam_context              *base;
+
+	int64_t                          frame_id;
+	uint32_t                         substate_activated;
+	struct cam_ctx_ops              *substate_machine;
+	struct cam_isp_ctx_irq_ops      *substate_machine_irq;
+
+	struct cam_ctx_request           req_base[CAM_CTX_REQ_MAX];
+	struct cam_isp_ctx_req           req_isp[CAM_CTX_REQ_MAX];
+
+	void                            *hw_ctx;
+	uint64_t                         sof_timestamp_val;
+	uint64_t                         boot_timestamp;
+	int32_t                          active_req_cnt;
+	int64_t                          reported_req_id;
+	uint32_t                         subscribe_event;
+	int64_t                          last_applied_req_id;
+	atomic64_t                       state_monitor_head;
+	struct cam_isp_context_state_monitor cam_isp_ctx_state_monitor[
+		CAM_ISP_CTX_STATE_MONITOR_MAX_ENTRIES];
+	bool                             rdi_only_context;
+	bool                             hw_acquired;
+	bool                             init_received;
+	bool                             split_acquire;
+};
+
+/**
+ * cam_isp_context_init()
+ *
+ * @brief:              Initialization function for the ISP context
+ *
+ * @ctx:                ISP context obj to be initialized
+ * @bridge_ops:         Bridge call back funciton
+ * @hw_intf:            ISP hw manager interface
+ * @ctx_id:             ID for this context
+ *
+ */
+int cam_isp_context_init(struct cam_isp_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_req_mgr_kmd_ops *bridge_ops,
+	struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id);
+
+/**
+ * cam_isp_context_deinit()
+ *
+ * @brief:               Deinitialize function for the ISP context
+ *
+ * @ctx:                 ISP context obj to be deinitialized
+ *
+ */
+int cam_isp_context_deinit(struct cam_isp_context *ctx);
+
+
+#endif  /* __CAM_ISP_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
new file mode 100644
index 0000000..d6aa018
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+
+#include <uapi/media/cam_req_mgr.h>
+#include "cam_isp_dev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_node.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+
+static struct cam_isp_dev g_isp_dev;
+
+static void cam_isp_dev_iommu_fault_handler(
+	struct iommu_domain *domain, struct device *dev, unsigned long iova,
+	int flags, void *token, uint32_t buf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!token) {
+		CAM_ERR(CAM_ISP, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+			buf_info);
+}
+
+static const struct of_device_id cam_isp_dt_match[] = {
+	{
+		.compatible = "qcom,cam-isp"
+	},
+	{}
+};
+
+static int cam_isp_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	mutex_lock(&g_isp_dev.isp_mutex);
+	g_isp_dev.open_cnt++;
+	mutex_unlock(&g_isp_dev.isp_mutex);
+
+	return 0;
+}
+
+static int cam_isp_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&g_isp_dev.isp_mutex);
+	if (g_isp_dev.open_cnt <= 0) {
+		CAM_DBG(CAM_ISP, "ISP subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	g_isp_dev.open_cnt--;
+	if (!node) {
+		CAM_ERR(CAM_ISP, "Node ptr is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (g_isp_dev.open_cnt == 0)
+		cam_node_shutdown(node);
+
+end:
+	mutex_unlock(&g_isp_dev.isp_mutex);
+	return rc;
+}
+
+static const struct v4l2_subdev_internal_ops cam_isp_subdev_internal_ops = {
+	.close = cam_isp_subdev_close,
+	.open = cam_isp_subdev_open,
+};
+
+static int cam_isp_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	int i;
+
+	/* clean up resources */
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_isp_context_deinit(&g_isp_dev.ctx_isp[i]);
+		if (rc)
+			CAM_ERR(CAM_ISP, "ISP context %d deinit failed",
+				 i);
+	}
+
+	rc = cam_subdev_remove(&g_isp_dev.sd);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Unregister failed");
+
+	memset(&g_isp_dev, 0, sizeof(g_isp_dev));
+	return 0;
+}
+
+static int cam_isp_dev_probe(struct platform_device *pdev)
+{
+	int rc = -1;
+	int i;
+	struct cam_hw_mgr_intf         hw_mgr_intf;
+	struct cam_node               *node;
+	int iommu_hdl = -1;
+
+	g_isp_dev.sd.internal_ops = &cam_isp_subdev_internal_ops;
+	/* Initialize the v4l2 subdevice first. (create cam_node) */
+	rc = cam_subdev_probe(&g_isp_dev.sd, pdev, CAM_ISP_DEV_NAME,
+		CAM_IFE_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "ISP cam_subdev_probe failed!");
+		goto err;
+	}
+	node = (struct cam_node *) g_isp_dev.sd.token;
+
+	memset(&hw_mgr_intf, 0, sizeof(hw_mgr_intf));
+	rc = cam_isp_hw_mgr_init(pdev->dev.of_node, &hw_mgr_intf, &iommu_hdl);
+	if (rc != 0) {
+		CAM_ERR(CAM_ISP, "Can not initialized ISP HW manager!");
+		goto unregister;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_isp_context_init(&g_isp_dev.ctx_isp[i],
+			&g_isp_dev.ctx[i],
+			&node->crm_node_intf,
+			&node->hw_mgr_intf,
+			i);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "ISP context init failed!");
+			goto unregister;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_isp_dev.ctx, CAM_CTX_MAX,
+		CAM_ISP_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "ISP node init failed!");
+		goto unregister;
+	}
+
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_isp_dev_iommu_fault_handler, node);
+
+	mutex_init(&g_isp_dev.isp_mutex);
+
+	CAM_INFO(CAM_ISP, "Camera ISP probe complete");
+
+	return 0;
+unregister:
+	rc = cam_subdev_remove(&g_isp_dev.sd);
+err:
+	return rc;
+}
+
+
+static struct platform_driver isp_driver = {
+	.probe = cam_isp_dev_probe,
+	.remove = cam_isp_dev_remove,
+	.driver = {
+		.name = "cam_isp",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_isp_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_isp_dev_init_module(void)
+{
+	return platform_driver_register(&isp_driver);
+}
+
+static void __exit cam_isp_dev_exit_module(void)
+{
+	platform_driver_unregister(&isp_driver);
+}
+
+module_init(cam_isp_dev_init_module);
+module_exit(cam_isp_dev_exit_module);
+MODULE_DESCRIPTION("MSM ISP driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
new file mode 100644
index 0000000..7be6894
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_dev.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ISP_DEV_H_
+#define _CAM_ISP_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_isp_context.h"
+
+/**
+ * struct cam_isp_dev - Camera ISP V4l2 device node
+ *
+ * @sd:                    Commone camera subdevice node
+ * @ctx:                   Isp base context storage
+ * @ctx_isp:               Isp private context storage
+ * @isp_mutex:             ISP dev mutex
+ * @open_cnt:              Open device count
+ */
+struct cam_isp_dev {
+	struct cam_subdev          sd;
+	struct cam_context         ctx[CAM_CTX_MAX];
+	struct cam_isp_context     ctx_isp[CAM_CTX_MAX];
+	struct mutex               isp_mutex;
+	int32_t                    open_cnt;
+};
+
+#endif /* __CAM_ISP_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
new file mode 100644
index 0000000..ca4848b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += hw_utils/ isp_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_isp_hw_mgr.o cam_ife_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
new file mode 100644
index 0000000..d92299ec
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -0,0 +1,5025 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <soc/qcom/scm.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_smmu_api.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_isp_packet_parser.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_packet_util.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_common_util.h"
+
+#define CAM_IFE_HW_ENTRIES_MAX  20
+
+#define TZ_SVC_SMMU_PROGRAM 0x15
+#define TZ_SAFE_SYSCALL_ID  0x3
+#define CAM_IFE_SAFE_DISABLE 0
+#define CAM_IFE_SAFE_ENABLE 1
+#define SMMU_SE_IFE 0
+
+#define CAM_ISP_PACKET_META_MAX                     \
+	(CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
+
+#define CAM_ISP_GENERIC_BLOB_TYPE_MAX               \
+	(CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG + 1)
+
+static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
+	CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+	CAM_ISP_HW_CMD_CLOCK_UPDATE,
+	CAM_ISP_HW_CMD_BW_UPDATE,
+	CAM_ISP_HW_CMD_UBWC_UPDATE,
+	CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE,
+};
+
+static struct cam_ife_hw_mgr g_ife_hw_mgr;
+
+static int cam_ife_notify_safe_lut_scm(bool safe_trigger)
+{
+	uint32_t camera_hw_version, rc = 0;
+	struct scm_desc desc = {0};
+
+	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+	if (!rc) {
+		switch (camera_hw_version) {
+		case CAM_CPAS_TITAN_170_V100:
+		case CAM_CPAS_TITAN_170_V110:
+		case CAM_CPAS_TITAN_175_V100:
+
+			desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+			desc.args[0] = SMMU_SE_IFE;
+			desc.args[1] = safe_trigger;
+
+			CAM_DBG(CAM_ISP, "Safe scm call %d", safe_trigger);
+			if (scm_call2(SCM_SIP_FNID(TZ_SVC_SMMU_PROGRAM,
+					TZ_SAFE_SYSCALL_ID), &desc)) {
+				CAM_ERR(CAM_ISP,
+					"scm call to Enable Safe failed");
+				rc = -EINVAL;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_ife_mgr_get_hw_caps(void *hw_mgr_priv,
+	void *hw_caps_args)
+{
+	int rc = 0;
+	int i;
+	struct cam_ife_hw_mgr             *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd          *query = hw_caps_args;
+	struct cam_isp_query_cap_cmd       query_isp;
+
+	CAM_DBG(CAM_ISP, "enter");
+
+	if (copy_from_user(&query_isp,
+		u64_to_user_ptr(query->caps_handle),
+		sizeof(struct cam_isp_query_cap_cmd))) {
+		rc = -EFAULT;
+		return rc;
+	}
+
+	query_isp.device_iommu.non_secure = hw_mgr->mgr_common.img_iommu_hdl;
+	query_isp.device_iommu.secure = hw_mgr->mgr_common.img_iommu_hdl_secure;
+	query_isp.cdm_iommu.non_secure = hw_mgr->mgr_common.cmd_iommu_hdl;
+	query_isp.cdm_iommu.secure = hw_mgr->mgr_common.cmd_iommu_hdl_secure;
+	query_isp.num_dev = 2;
+	for (i = 0; i < query_isp.num_dev; i++) {
+		query_isp.dev_caps[i].hw_type = CAM_ISP_HW_IFE;
+		query_isp.dev_caps[i].hw_version.major = 1;
+		query_isp.dev_caps[i].hw_version.minor = 7;
+		query_isp.dev_caps[i].hw_version.incr = 0;
+		query_isp.dev_caps[i].hw_version.reserved = 0;
+	}
+
+	if (copy_to_user(u64_to_user_ptr(query->caps_handle),
+		&query_isp, sizeof(struct cam_isp_query_cap_cmd)))
+		rc = -EFAULT;
+
+	CAM_DBG(CAM_ISP, "exit rc :%d", rc);
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_is_rdi_res(uint32_t res_id)
+{
+	int rc = 0;
+
+	switch (res_id) {
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+	case CAM_ISP_IFE_OUT_RES_RDI_3:
+		rc = 1;
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_reset_csid_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	int rc = 0;
+	struct cam_hw_intf      *hw_intf;
+	struct cam_csid_reset_cfg_args  csid_reset_args;
+
+	csid_reset_args.reset_type = CAM_IFE_CSID_RESET_PATH;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		csid_reset_args.node_res = isp_hw_res->hw_res[i];
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		CAM_DBG(CAM_ISP, "Resetting csid hardware %d",
+			hw_intf->hw_idx);
+		if (hw_intf->hw_ops.reset) {
+			rc = hw_intf->hw_ops.reset(hw_intf->hw_priv,
+				&csid_reset_args,
+				sizeof(struct cam_csid_reset_cfg_args));
+			if (rc <= 0)
+				goto err;
+		}
+	}
+
+	return 0;
+err:
+	CAM_ERR(CAM_ISP, "RESET HW res failed: (type:%d, id:%d)",
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
+static int cam_ife_hw_mgr_init_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	int rc = -1;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		CAM_DBG(CAM_ISP, "enabled vfe hardware %d",
+			hw_intf->hw_idx);
+		if (hw_intf->hw_ops.init) {
+			rc = hw_intf->hw_ops.init(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc)
+				goto err;
+		}
+	}
+
+	return 0;
+err:
+	CAM_ERR(CAM_ISP, "INIT HW res failed: (type:%d, id:%d)",
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
+static int cam_ife_hw_mgr_start_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res,
+	struct cam_ife_hw_mgr_ctx   *ctx)
+{
+	int i;
+	int rc = -1;
+	struct cam_hw_intf      *hw_intf;
+
+	/* Start slave (which is right split) first */
+	for (i = CAM_ISP_HW_SPLIT_MAX - 1; i >= 0; i--) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.start) {
+			isp_hw_res->hw_res[i]->rdi_only_ctx =
+				ctx->is_rdi_only_context;
+			rc = hw_intf->hw_ops.start(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc) {
+				CAM_ERR(CAM_ISP, "Can not start HW resources");
+				goto err;
+			}
+			CAM_DBG(CAM_ISP, "Start HW %d Res %d", hw_intf->hw_idx,
+				isp_hw_res->hw_res[i]->res_id);
+		} else {
+			CAM_ERR(CAM_ISP, "function null");
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	CAM_ERR(CAM_ISP, "Start hw res failed (type:%d, id:%d)",
+		isp_hw_res->res_type, isp_hw_res->res_id);
+	return rc;
+}
+
+static void cam_ife_hw_mgr_stop_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	struct cam_hw_intf      *hw_intf;
+	uint32_t dummy_args;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.stop)
+			hw_intf->hw_ops.stop(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+		else
+			CAM_ERR(CAM_ISP, "stop null");
+		if (hw_intf->hw_ops.process_cmd &&
+			isp_hw_res->res_type == CAM_IFE_HW_MGR_RES_IFE_OUT) {
+			hw_intf->hw_ops.process_cmd(hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
+				&dummy_args, sizeof(dummy_args));
+		}
+	}
+}
+
+static void cam_ife_hw_mgr_deinit_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int i;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.deinit)
+			hw_intf->hw_ops.deinit(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+	}
+}
+
+static int cam_ife_hw_mgr_init_hw(
+	struct cam_ife_hw_mgr_ctx *ctx)
+{
+	struct cam_ife_hw_mgr_res *hw_mgr_res;
+	int rc = 0, i;
+
+	CAM_DBG(CAM_ISP, "INIT IFE CID ... in ctx id:%d",
+		ctx->ctx_index);
+	/* INIT IFE CID */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not INIT IFE CID(id :%d)",
+				 hw_mgr_res->res_id);
+			return rc;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "INIT IFE csid ... in ctx id:%d",
+		ctx->ctx_index);
+
+	/* INIT IFE csid */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not INIT IFE CSID(id :%d)",
+				 hw_mgr_res->res_id);
+			return rc;
+		}
+	}
+
+	/* INIT IFE SRC */
+	CAM_DBG(CAM_ISP, "INIT IFE SRC in ctx id:%d",
+		ctx->ctx_index);
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		rc = cam_ife_hw_mgr_init_hw_res(hw_mgr_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not INIT IFE SRC (%d)",
+				 hw_mgr_res->res_id);
+			return rc;
+		}
+	}
+
+	/* INIT IFE OUT */
+	CAM_DBG(CAM_ISP, "INIT IFE OUT RESOURCES in ctx id:%d",
+		ctx->ctx_index);
+
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		rc = cam_ife_hw_mgr_init_hw_res(&ctx->res_list_ife_out[i]);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not INIT IFE OUT (%d)",
+				 ctx->res_list_ife_out[i].res_id);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static void cam_ife_hw_mgr_deinit_hw(
+	struct cam_ife_hw_mgr_ctx *ctx)
+{
+	struct cam_ife_hw_mgr_res *hw_mgr_res;
+	int i = 0;
+
+	if (!ctx->init_done) {
+		CAM_WARN(CAM_ISP, "ctx is not in init state");
+		return;
+	}
+
+	/* Deinit IFE CID */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+		CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CID\n", __func__);
+		cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+	}
+
+	/* Deinit IFE CSID */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		CAM_DBG(CAM_ISP, "%s: Going to DeInit IFE CSID\n", __func__);
+		cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+	}
+
+	/* Deint IFE MUX(SRC) */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		cam_ife_hw_mgr_deinit_hw_res(hw_mgr_res);
+	}
+
+	/* Deinit IFE OUT */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+		cam_ife_hw_mgr_deinit_hw_res(&ctx->res_list_ife_out[i]);
+
+	ctx->init_done = false;
+}
+
+static int cam_ife_hw_mgr_put_res(
+	struct list_head                *src_list,
+	struct cam_ife_hw_mgr_res      **res)
+{
+	int rc                              = 0;
+	struct cam_ife_hw_mgr_res *res_ptr  = NULL;
+
+	res_ptr = *res;
+	if (res_ptr)
+		list_add_tail(&res_ptr->list, src_list);
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_get_res(
+	struct list_head                *src_list,
+	struct cam_ife_hw_mgr_res      **res)
+{
+	int rc = 0;
+	struct cam_ife_hw_mgr_res *res_ptr  = NULL;
+
+	if (!list_empty(src_list)) {
+		res_ptr = list_first_entry(src_list,
+			struct cam_ife_hw_mgr_res, list);
+		list_del_init(&res_ptr->list);
+	} else {
+		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
+		rc = -1;
+	}
+	*res = res_ptr;
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_free_hw_res(
+	struct cam_ife_hw_mgr_res   *isp_hw_res)
+{
+	int rc = 0;
+	int i;
+	struct cam_hw_intf      *hw_intf;
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!isp_hw_res->hw_res[i])
+			continue;
+		hw_intf = isp_hw_res->hw_res[i]->hw_intf;
+		if (hw_intf->hw_ops.release) {
+			rc = hw_intf->hw_ops.release(hw_intf->hw_priv,
+				isp_hw_res->hw_res[i],
+				sizeof(struct cam_isp_resource_node));
+			if (rc)
+				CAM_ERR(CAM_ISP,
+					"Release hw resrouce id %d failed",
+					isp_hw_res->res_id);
+			isp_hw_res->hw_res[i] = NULL;
+		} else
+			CAM_ERR(CAM_ISP, "Release null");
+	}
+	/* caller should make sure the resource is in a list */
+	list_del_init(&isp_hw_res->list);
+	memset(isp_hw_res, 0, sizeof(*isp_hw_res));
+	INIT_LIST_HEAD(&isp_hw_res->list);
+
+	return 0;
+}
+
+static int cam_ife_mgr_csid_stop_hw(
+	struct cam_ife_hw_mgr_ctx *ctx, struct list_head  *stop_list,
+		uint32_t  base_idx, uint32_t stop_cmd)
+{
+	struct cam_ife_hw_mgr_res      *hw_mgr_res;
+	struct cam_isp_resource_node   *isp_res;
+	struct cam_isp_resource_node   *stop_res[CAM_IFE_PIX_PATH_RES_MAX - 1];
+	struct cam_csid_hw_stop_args    stop;
+	struct cam_hw_intf             *hw_intf;
+	uint32_t i, cnt;
+
+	cnt = 0;
+	list_for_each_entry(hw_mgr_res, stop_list, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			isp_res = hw_mgr_res->hw_res[i];
+			if (isp_res->hw_intf->hw_idx != base_idx)
+				continue;
+			CAM_DBG(CAM_ISP, "base_idx %d res_id %d cnt %u",
+				base_idx, isp_res->res_id, cnt);
+			stop_res[cnt] = isp_res;
+			cnt++;
+		}
+	}
+
+	if (cnt) {
+		hw_intf =  stop_res[0]->hw_intf;
+		stop.num_res = cnt;
+		stop.node_res = stop_res;
+		stop.stop_cmd = stop_cmd;
+		hw_intf->hw_ops.stop(hw_intf->hw_priv, &stop, sizeof(stop));
+	}
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_release_hw_for_ctx(
+	struct cam_ife_hw_mgr_ctx  *ife_ctx)
+{
+	uint32_t                          i;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res_temp;
+
+	/* ife leaf resource */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+		cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_out[i]);
+
+	/* ife source resource */
+	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+		&ife_ctx->res_list_ife_src, list) {
+		cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+		cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+	}
+
+	/* ife csid resource */
+	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+		&ife_ctx->res_list_ife_csid, list) {
+		cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+		cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+	}
+
+	/* ife cid resource */
+	list_for_each_entry_safe(hw_mgr_res, hw_mgr_res_temp,
+		&ife_ctx->res_list_ife_cid, list) {
+		cam_ife_hw_mgr_free_hw_res(hw_mgr_res);
+		cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &hw_mgr_res);
+	}
+
+	/* ife root node */
+	if (ife_ctx->res_list_ife_in.res_type != CAM_IFE_HW_MGR_RES_UNINIT)
+		cam_ife_hw_mgr_free_hw_res(&ife_ctx->res_list_ife_in);
+
+	/* clean up the callback function */
+	ife_ctx->common.cb_priv = NULL;
+	memset(ife_ctx->common.event_cb, 0, sizeof(ife_ctx->common.event_cb));
+
+	CAM_DBG(CAM_ISP, "release context completed ctx id:%d",
+		ife_ctx->ctx_index);
+
+	return 0;
+}
+
+
+static int cam_ife_hw_mgr_put_ctx(
+	struct list_head                 *src_list,
+	struct cam_ife_hw_mgr_ctx       **ife_ctx)
+{
+	int rc                              = 0;
+	struct cam_ife_hw_mgr_ctx *ctx_ptr  = NULL;
+
+	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	ctx_ptr = *ife_ctx;
+	if (ctx_ptr)
+		list_add_tail(&ctx_ptr->list, src_list);
+	*ife_ctx = NULL;
+	mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+	return rc;
+}
+
+static int cam_ife_hw_mgr_get_ctx(
+	struct list_head                *src_list,
+	struct cam_ife_hw_mgr_ctx       **ife_ctx)
+{
+	int rc                              = 0;
+	struct cam_ife_hw_mgr_ctx *ctx_ptr  = NULL;
+
+	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	if (!list_empty(src_list)) {
+		ctx_ptr = list_first_entry(src_list,
+			struct cam_ife_hw_mgr_ctx, list);
+		list_del_init(&ctx_ptr->list);
+	} else {
+		CAM_ERR(CAM_ISP, "No more free ife hw mgr ctx");
+		rc = -1;
+	}
+	*ife_ctx = ctx_ptr;
+	mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static void cam_ife_mgr_add_base_info(
+	struct cam_ife_hw_mgr_ctx       *ctx,
+	enum cam_isp_hw_split_id         split_id,
+	uint32_t                         base_idx)
+{
+	uint32_t    i;
+
+	if (!ctx->num_base) {
+		ctx->base[0].split_id = split_id;
+		ctx->base[0].idx      = base_idx;
+		ctx->num_base++;
+		CAM_DBG(CAM_ISP,
+			"Add split id = %d for base idx = %d num_base=%d",
+			split_id, base_idx, ctx->num_base);
+	} else {
+		/*Check if base index already exists in the list */
+		for (i = 0; i < ctx->num_base; i++) {
+			if (ctx->base[i].idx == base_idx) {
+				if (split_id != CAM_ISP_HW_SPLIT_MAX &&
+					ctx->base[i].split_id ==
+						CAM_ISP_HW_SPLIT_MAX)
+					ctx->base[i].split_id = split_id;
+
+				break;
+			}
+		}
+
+		if (i == ctx->num_base) {
+			ctx->base[ctx->num_base].split_id = split_id;
+			ctx->base[ctx->num_base].idx      = base_idx;
+			ctx->num_base++;
+			CAM_DBG(CAM_ISP,
+				"Add split_id=%d for base idx=%d num_base=%d",
+				 split_id, base_idx, ctx->num_base);
+		}
+	}
+}
+
+static int cam_ife_mgr_process_base_info(
+	struct cam_ife_hw_mgr_ctx        *ctx)
+{
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_isp_resource_node     *res = NULL;
+	uint32_t i;
+
+	if (list_empty(&ctx->res_list_ife_src)) {
+		CAM_ERR(CAM_ISP, "Mux List empty");
+		return -ENODEV;
+	}
+
+	/* IFE mux in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			res = hw_mgr_res->hw_res[i];
+			cam_ife_mgr_add_base_info(ctx, i,
+					res->hw_intf->hw_idx);
+			CAM_DBG(CAM_ISP, "add base info for hw %d",
+				res->hw_intf->hw_idx);
+		}
+	}
+	CAM_DBG(CAM_ISP, "ctx base num = %d", ctx->num_base);
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out_rdi(
+	struct cam_ife_hw_mgr_ctx       *ife_ctx,
+	struct cam_ife_hw_mgr_res       *ife_src_res,
+	struct cam_isp_in_port_info     *in_port)
+{
+	int rc = -EINVAL;
+	struct cam_vfe_acquire_args               vfe_acquire;
+	struct cam_isp_out_port_info             *out_port = NULL;
+	struct cam_ife_hw_mgr_res                *ife_out_res;
+	struct cam_hw_intf                       *hw_intf;
+	uint32_t  i, vfe_out_res_id, vfe_in_res_id;
+
+	/* take left resource */
+	vfe_in_res_id = ife_src_res->hw_res[0]->res_id;
+
+	switch (vfe_in_res_id) {
+	case CAM_ISP_HW_VFE_IN_RDI0:
+		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_0;
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI1:
+		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_1;
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI2:
+		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_2;
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI3:
+		vfe_out_res_id = CAM_ISP_IFE_OUT_RES_RDI_3;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "invalid resource type");
+		goto err;
+	}
+	CAM_DBG(CAM_ISP, "vfe_in_res_id = %d, vfe_out_red_id = %d",
+		vfe_in_res_id, vfe_out_res_id);
+
+	vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
+	vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+
+	ife_out_res = &ife_ctx->res_list_ife_out[vfe_out_res_id & 0xFF];
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+
+		CAM_DBG(CAM_ISP, "i = %d, vfe_out_res_id = %d, out_port: %d",
+			i, vfe_out_res_id, out_port->res_type);
+
+		if (vfe_out_res_id != out_port->res_type)
+			continue;
+
+		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+		vfe_acquire.vfe_out.ctx = ife_ctx;
+		vfe_acquire.vfe_out.out_port_info = out_port;
+		vfe_acquire.vfe_out.split_id = CAM_ISP_HW_SPLIT_LEFT;
+		vfe_acquire.vfe_out.unique_id = ife_ctx->ctx_index;
+		vfe_acquire.vfe_out.is_dual = 0;
+		hw_intf = ife_src_res->hw_res[0]->hw_intf;
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+			&vfe_acquire,
+			sizeof(struct cam_vfe_acquire_args));
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not acquire out resource 0x%x",
+				 out_port->res_type);
+			goto err;
+		}
+		break;
+	}
+
+	if (i == in_port->num_out_res) {
+		CAM_ERR(CAM_ISP,
+			"Cannot acquire out resource, i=%d, num_out_res=%d",
+			i, in_port->num_out_res);
+		goto err;
+	}
+
+	ife_out_res->hw_res[0] = vfe_acquire.vfe_out.rsrc_node;
+	ife_out_res->is_dual_vfe = 0;
+	ife_out_res->res_id = vfe_out_res_id;
+	ife_out_res->res_type = (enum cam_ife_hw_mgr_res_type)
+		CAM_ISP_RESOURCE_VFE_OUT;
+	ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
+
+	return 0;
+err:
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out_pixel(
+	struct cam_ife_hw_mgr_ctx       *ife_ctx,
+	struct cam_ife_hw_mgr_res       *ife_src_res,
+	struct cam_isp_in_port_info     *in_port)
+{
+	int rc = -1;
+	uint32_t  i, j, k;
+	struct cam_vfe_acquire_args               vfe_acquire;
+	struct cam_isp_out_port_info             *out_port;
+	struct cam_ife_hw_mgr_res                *ife_out_res;
+	struct cam_hw_intf                       *hw_intf;
+
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+		k = out_port->res_type & 0xFF;
+		if (k >= CAM_IFE_HW_OUT_RES_MAX) {
+			CAM_ERR(CAM_ISP, "invalid output resource type 0x%x",
+				 out_port->res_type);
+			continue;
+		}
+
+		if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
+			continue;
+
+		CAM_DBG(CAM_ISP, "res_type 0x%x",
+			 out_port->res_type);
+
+		ife_out_res = &ife_ctx->res_list_ife_out[k];
+		ife_out_res->is_dual_vfe = in_port->usage_type;
+
+		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_OUT;
+		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+		vfe_acquire.vfe_out.cdm_ops = ife_ctx->cdm_ops;
+		vfe_acquire.vfe_out.ctx = ife_ctx;
+		vfe_acquire.vfe_out.out_port_info =  out_port;
+		vfe_acquire.vfe_out.is_dual       = ife_src_res->is_dual_vfe;
+		vfe_acquire.vfe_out.unique_id     = ife_ctx->ctx_index;
+
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			if (!ife_src_res->hw_res[j])
+				continue;
+
+			hw_intf = ife_src_res->hw_res[j]->hw_intf;
+
+			if (j == CAM_ISP_HW_SPLIT_LEFT) {
+				vfe_acquire.vfe_out.split_id  =
+					CAM_ISP_HW_SPLIT_LEFT;
+				if (ife_src_res->is_dual_vfe) {
+					/*TBD */
+					vfe_acquire.vfe_out.is_master     = 1;
+					vfe_acquire.vfe_out.dual_slave_core =
+						(hw_intf->hw_idx == 0) ? 1 : 0;
+				} else {
+					vfe_acquire.vfe_out.is_master   = 0;
+					vfe_acquire.vfe_out.dual_slave_core =
+						0;
+				}
+			} else {
+				vfe_acquire.vfe_out.split_id  =
+					CAM_ISP_HW_SPLIT_RIGHT;
+				vfe_acquire.vfe_out.is_master       = 0;
+				vfe_acquire.vfe_out.dual_slave_core =
+					(hw_intf->hw_idx == 0) ? 1 : 0;
+			}
+
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&vfe_acquire,
+				sizeof(struct cam_vfe_acquire_args));
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Can not acquire out resource 0x%x",
+					out_port->res_type);
+				goto err;
+			}
+
+			ife_out_res->hw_res[j] =
+				vfe_acquire.vfe_out.rsrc_node;
+			CAM_DBG(CAM_ISP, "resource type :0x%x res id:0x%x",
+				ife_out_res->hw_res[j]->res_type,
+				ife_out_res->hw_res[j]->res_id);
+
+		}
+		ife_out_res->res_type =
+			(enum cam_ife_hw_mgr_res_type)CAM_ISP_RESOURCE_VFE_OUT;
+		ife_out_res->res_id = out_port->res_type;
+		ife_out_res->parent = ife_src_res;
+		ife_src_res->child[ife_src_res->num_children++] = ife_out_res;
+	}
+
+	return 0;
+err:
+	/* release resource at the entry function */
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_out(
+	struct cam_ife_hw_mgr_ctx     *ife_ctx,
+	struct cam_isp_in_port_info   *in_port)
+{
+	int rc = -EINVAL;
+	struct cam_ife_hw_mgr_res       *ife_src_res;
+
+	list_for_each_entry(ife_src_res, &ife_ctx->res_list_ife_src, list) {
+		if (ife_src_res->num_children)
+			continue;
+
+		switch (ife_src_res->res_id) {
+		case CAM_ISP_HW_VFE_IN_CAMIF:
+		case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
+			rc = cam_ife_hw_mgr_acquire_res_ife_out_pixel(ife_ctx,
+				ife_src_res, in_port);
+			break;
+		case CAM_ISP_HW_VFE_IN_RDI0:
+		case CAM_ISP_HW_VFE_IN_RDI1:
+		case CAM_ISP_HW_VFE_IN_RDI2:
+		case CAM_ISP_HW_VFE_IN_RDI3:
+			rc = cam_ife_hw_mgr_acquire_res_ife_out_rdi(ife_ctx,
+				ife_src_res, in_port);
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Unknown IFE SRC resource: %d",
+				ife_src_res->res_id);
+			break;
+		}
+		if (rc)
+			goto err;
+	}
+
+	return 0;
+err:
+	/* release resource on entry function */
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_src(
+	struct cam_ife_hw_mgr_ctx     *ife_ctx,
+	struct cam_isp_in_port_info   *in_port)
+{
+	int rc                = -1;
+	int i;
+	struct cam_ife_hw_mgr_res                  *csid_res;
+	struct cam_ife_hw_mgr_res                  *ife_src_res;
+	struct cam_vfe_acquire_args                 vfe_acquire;
+	struct cam_hw_intf                         *hw_intf;
+	struct cam_ife_hw_mgr                      *ife_hw_mgr;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	list_for_each_entry(csid_res, &ife_ctx->res_list_ife_csid, list) {
+		if (csid_res->num_children)
+			continue;
+
+		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
+			&ife_src_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+			goto err;
+		}
+		cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_src,
+			&ife_src_res);
+
+		vfe_acquire.rsrc_type = CAM_ISP_RESOURCE_VFE_IN;
+		vfe_acquire.tasklet = ife_ctx->common.tasklet_info;
+		vfe_acquire.vfe_in.cdm_ops = ife_ctx->cdm_ops;
+		vfe_acquire.vfe_in.in_port = in_port;
+
+		switch (csid_res->res_id) {
+		case CAM_IFE_PIX_PATH_RES_IPP:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_CAMIF;
+			if (csid_res->is_dual_vfe)
+				vfe_acquire.vfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_MASTER;
+			else
+				vfe_acquire.vfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_NONE;
+
+			break;
+		case CAM_IFE_PIX_PATH_RES_PPP:
+			vfe_acquire.vfe_in.res_id =
+				CAM_ISP_HW_VFE_IN_CAMIF_LITE;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_0:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI0;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_1:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI1;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_2:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI2;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		case CAM_IFE_PIX_PATH_RES_RDI_3:
+			vfe_acquire.vfe_in.res_id = CAM_ISP_HW_VFE_IN_RDI3;
+			vfe_acquire.vfe_in.sync_mode = CAM_ISP_HW_SYNC_NONE;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Wrong IFE CSID Resource Node");
+			goto err;
+		}
+		ife_src_res->res_type =
+			(enum cam_ife_hw_mgr_res_type)vfe_acquire.rsrc_type;
+		ife_src_res->res_id = vfe_acquire.vfe_in.res_id;
+		ife_src_res->is_dual_vfe = csid_res->is_dual_vfe;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!csid_res->hw_res[i])
+				continue;
+
+			hw_intf = ife_hw_mgr->ife_devices[
+				csid_res->hw_res[i]->hw_intf->hw_idx];
+
+			/* fill in more acquire information as needed */
+			/* slave Camif resource, */
+			if (i == CAM_ISP_HW_SPLIT_RIGHT &&
+				ife_src_res->is_dual_vfe)
+				vfe_acquire.vfe_in.sync_mode =
+				CAM_ISP_HW_SYNC_SLAVE;
+
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+					&vfe_acquire,
+					sizeof(struct cam_vfe_acquire_args));
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Can not acquire IFE HW res %d",
+					csid_res->res_id);
+				goto err;
+			}
+			ife_src_res->hw_res[i] = vfe_acquire.vfe_in.rsrc_node;
+			CAM_DBG(CAM_ISP,
+				"acquire success IFE:%d  res type :0x%x res id:0x%x",
+				hw_intf->hw_idx,
+				ife_src_res->hw_res[i]->res_type,
+				ife_src_res->hw_res[i]->res_id);
+
+		}
+
+		/* It should be one to one mapping between
+		 * csid resource and ife source resource
+		 */
+		csid_res->child[0] = ife_src_res;
+		ife_src_res->parent = csid_res;
+		csid_res->child[csid_res->num_children++] = ife_src_res;
+		CAM_DBG(CAM_ISP, "csid_res=%d  num_children=%d ife_src_res=%d",
+			csid_res->res_id, csid_res->num_children,
+			ife_src_res->res_id);
+	}
+
+	return 0;
+err:
+	/* release resource at the entry function */
+	return rc;
+}
+
+static int cam_ife_mgr_acquire_cid_res(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port,
+	struct cam_ife_hw_mgr_res         **cid_res,
+	enum cam_ife_pix_path_res_id        csid_path)
+{
+	int rc = -1;
+	int i, j;
+	struct cam_ife_hw_mgr               *ife_hw_mgr;
+	struct cam_hw_intf                  *hw_intf;
+	struct cam_ife_hw_mgr_res           *cid_res_temp, *cid_res_iterator;
+	struct cam_csid_hw_reserve_resource_args  csid_acquire;
+	uint32_t acquired_cnt = 0;
+	struct cam_isp_out_port_info        *out_port = NULL;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+	*cid_res = NULL;
+
+	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, cid_res);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+		goto end;
+	}
+
+	cid_res_temp = *cid_res;
+
+	csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+	csid_acquire.in_port = in_port;
+	csid_acquire.res_id =  csid_path;
+	CAM_DBG(CAM_ISP, "path %d", csid_path);
+
+	if (in_port->num_out_res)
+		out_port = &(in_port->data[0]);
+
+	/* Try acquiring CID resource from previously acquired HW */
+	list_for_each_entry(cid_res_iterator, &ife_ctx->res_list_ife_cid,
+		list) {
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!cid_res_iterator->hw_res[i])
+				continue;
+
+			if (cid_res_iterator->is_secure == 1 ||
+				(cid_res_iterator->is_secure == 0 &&
+				in_port->num_out_res &&
+				out_port->secure_mode == 1))
+				continue;
+
+			hw_intf = cid_res_iterator->hw_res[i]->hw_intf;
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc) {
+				CAM_DBG(CAM_ISP,
+					"No ife cid resource from hw %d",
+					hw_intf->hw_idx);
+				continue;
+			}
+
+			cid_res_temp->hw_res[acquired_cnt++] =
+				csid_acquire.node_res;
+
+			CAM_DBG(CAM_ISP,
+				"acquired csid(%s)=%d CID rsrc successfully",
+				(i == 0) ? "left" : "right",
+				hw_intf->hw_idx);
+
+			if (in_port->usage_type && acquired_cnt == 1 &&
+				csid_path == CAM_IFE_PIX_PATH_RES_IPP)
+				/*
+				 * Continue to acquire Right for IPP.
+				 * Dual IFE for RDI and PPP is not currently
+				 * supported.
+				 */
+
+				continue;
+
+			if (acquired_cnt)
+				/*
+				 * If successfully acquired CID from
+				 * previously acquired HW, skip the next
+				 * part
+				 */
+				goto acquire_successful;
+		}
+	}
+
+	/* Acquire Left if not already acquired */
+	for (i = CAM_IFE_CSID_HW_NUM_MAX - 1; i >= 0; i--) {
+		if (!ife_hw_mgr->csid_devices[i])
+			continue;
+
+		hw_intf = ife_hw_mgr->csid_devices[i];
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv, &csid_acquire,
+			sizeof(csid_acquire));
+		if (rc)
+			continue;
+		else {
+			cid_res_temp->hw_res[acquired_cnt++] =
+				csid_acquire.node_res;
+			break;
+		}
+	}
+
+	if (i == -1 || !csid_acquire.node_res) {
+		CAM_ERR(CAM_ISP, "Can not acquire ife cid resource for path %d",
+			csid_path);
+		goto put_res;
+	}
+
+acquire_successful:
+	CAM_DBG(CAM_ISP, "CID left acquired success is_dual %d",
+		in_port->usage_type);
+
+	cid_res_temp->res_type = CAM_IFE_HW_MGR_RES_CID;
+	/* CID(DT_ID) value of acquire device, require for path */
+	cid_res_temp->res_id = csid_acquire.node_res->res_id;
+	cid_res_temp->is_dual_vfe = in_port->usage_type;
+
+	if (in_port->num_out_res)
+		cid_res_temp->is_secure = out_port->secure_mode;
+
+	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_cid, cid_res);
+
+	/*
+	 * Acquire Right if not already acquired.
+	 * Dual IFE for RDI and PPP is not currently supported.
+	 */
+	if (cid_res_temp->is_dual_vfe && csid_path
+		== CAM_IFE_PIX_PATH_RES_IPP && acquired_cnt == 1) {
+		csid_acquire.node_res = NULL;
+		csid_acquire.res_type = CAM_ISP_RESOURCE_CID;
+		csid_acquire.in_port = in_port;
+		for (j = 0; j < CAM_IFE_CSID_HW_NUM_MAX; j++) {
+			if (!ife_hw_mgr->csid_devices[j])
+				continue;
+
+			if (j == cid_res_temp->hw_res[0]->hw_intf->hw_idx)
+				continue;
+
+			hw_intf = ife_hw_mgr->csid_devices[j];
+			rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+				&csid_acquire, sizeof(csid_acquire));
+			if (rc)
+				continue;
+			else
+				break;
+		}
+
+		if (j == CAM_IFE_CSID_HW_NUM_MAX) {
+			CAM_ERR(CAM_ISP,
+				"Can not acquire ife csid rdi resource");
+			goto end;
+		}
+		cid_res_temp->hw_res[1] = csid_acquire.node_res;
+		CAM_DBG(CAM_ISP, "CID right acquired success is_dual %d",
+			in_port->usage_type);
+	}
+	cid_res_temp->parent = &ife_ctx->res_list_ife_in;
+	ife_ctx->res_list_ife_in.child[
+		ife_ctx->res_list_ife_in.num_children++] = cid_res_temp;
+
+	return 0;
+put_res:
+	cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, cid_res);
+end:
+	return rc;
+
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_csid_pxl(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port,
+	bool                                is_ipp)
+{
+	int rc = -1;
+	int i;
+	int master_idx = -1;
+
+	struct cam_ife_hw_mgr                    *ife_hw_mgr;
+	struct cam_ife_hw_mgr_res                *csid_res;
+	struct cam_ife_hw_mgr_res                *cid_res;
+	struct cam_hw_intf                       *hw_intf;
+	struct cam_csid_hw_reserve_resource_args  csid_acquire;
+	enum cam_ife_pix_path_res_id              path_res_id;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+	/* get cid resource */
+	if (is_ipp)
+		path_res_id = CAM_IFE_PIX_PATH_RES_IPP;
+	else
+		path_res_id = CAM_IFE_PIX_PATH_RES_PPP;
+
+	rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res,
+		path_res_id);
+
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+		goto end;
+	}
+
+	rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list, &csid_res);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+		goto end;
+	}
+
+	csid_res->res_type =
+		(enum cam_ife_hw_mgr_res_type)CAM_ISP_RESOURCE_PIX_PATH;
+
+	csid_res->res_id = path_res_id;
+
+	if (in_port->usage_type && is_ipp)
+		csid_res->is_dual_vfe = 1;
+	else {
+		csid_res->is_dual_vfe = 0;
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+	}
+
+	/* IPP resource needs to be from same HW as CID resource */
+	for (i = 0; i <= csid_res->is_dual_vfe; i++) {
+		CAM_DBG(CAM_ISP, "i %d is_dual %d", i, csid_res->is_dual_vfe);
+
+		csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		csid_acquire.res_id = path_res_id;
+		csid_acquire.cid = cid_res->hw_res[i]->res_id;
+		csid_acquire.in_port = in_port;
+		csid_acquire.out_port = in_port->data;
+		csid_acquire.node_res = NULL;
+
+		hw_intf = cid_res->hw_res[i]->hw_intf;
+
+		if (csid_res->is_dual_vfe) {
+			if (i == CAM_ISP_HW_SPLIT_LEFT) {
+				master_idx = hw_intf->hw_idx;
+				csid_acquire.sync_mode =
+					CAM_ISP_HW_SYNC_MASTER;
+			} else {
+				if (master_idx == -1) {
+					CAM_ERR(CAM_ISP,
+						"No Master found");
+					goto put_res;
+				}
+				csid_acquire.sync_mode =
+					CAM_ISP_HW_SYNC_SLAVE;
+				csid_acquire.master_idx = master_idx;
+			}
+		}
+
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+			&csid_acquire, sizeof(csid_acquire));
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Cannot acquire ife csid pxl path rsrc %s",
+				(is_ipp) ? "IPP" : "PPP");
+			goto put_res;
+		}
+
+		csid_res->hw_res[i] = csid_acquire.node_res;
+		CAM_DBG(CAM_ISP,
+			"acquired csid(%s)=%d pxl path rsrc %s successfully",
+			(i == 0) ? "left" : "right", hw_intf->hw_idx,
+			(is_ipp) ? "IPP" : "PPP");
+	}
+	cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
+
+	csid_res->parent = cid_res;
+	cid_res->child[cid_res->num_children++] = csid_res;
+
+	CAM_DBG(CAM_ISP, "acquire res %d", csid_acquire.res_id);
+
+	return 0;
+put_res:
+	cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res);
+end:
+	return rc;
+}
+
+static enum cam_ife_pix_path_res_id
+	cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+	uint32_t                 out_port_type)
+{
+	enum cam_ife_pix_path_res_id path_id;
+
+	switch (out_port_type) {
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+		path_id = CAM_IFE_PIX_PATH_RES_RDI_0;
+		break;
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+		path_id = CAM_IFE_PIX_PATH_RES_RDI_1;
+		break;
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+		path_id = CAM_IFE_PIX_PATH_RES_RDI_2;
+		break;
+	case CAM_ISP_IFE_OUT_RES_RDI_3:
+		path_id = CAM_IFE_PIX_PATH_RES_RDI_3;
+		break;
+	default:
+		path_id = CAM_IFE_PIX_PATH_RES_MAX;
+		CAM_DBG(CAM_ISP, "maximum rdi output type exceeded");
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "out_port %d path_id %d", out_port_type, path_id);
+
+	return path_id;
+}
+
+static int cam_ife_hw_mgr_acquire_res_ife_csid_rdi(
+	struct cam_ife_hw_mgr_ctx     *ife_ctx,
+	struct cam_isp_in_port_info   *in_port)
+{
+	int rc = -EINVAL;
+	int i;
+
+	struct cam_ife_hw_mgr               *ife_hw_mgr;
+	struct cam_ife_hw_mgr_res           *csid_res;
+	struct cam_ife_hw_mgr_res           *cid_res;
+	struct cam_hw_intf                  *hw_intf;
+	struct cam_isp_out_port_info        *out_port;
+	struct cam_csid_hw_reserve_resource_args  csid_acquire;
+	enum cam_ife_pix_path_res_id         path_type;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+		path_type = cam_ife_hw_mgr_get_ife_csid_rdi_res_type(
+			out_port->res_type);
+		if (path_type == CAM_IFE_PIX_PATH_RES_MAX)
+			continue;
+
+		/* get cid resource */
+		rc = cam_ife_mgr_acquire_cid_res(ife_ctx, in_port, &cid_res,
+			path_type);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Acquire IFE CID resource Failed");
+			goto end;
+		}
+
+		/* For each RDI we need CID + PATH resource */
+		rc = cam_ife_hw_mgr_get_res(&ife_ctx->free_res_list,
+			&csid_res);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "No more free hw mgr resource");
+			goto end;
+		}
+
+		memset(&csid_acquire, 0, sizeof(csid_acquire));
+		csid_acquire.res_id = path_type;
+
+		csid_acquire.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		csid_acquire.cid = cid_res->hw_res[0]->res_id;
+		csid_acquire.in_port = in_port;
+		csid_acquire.out_port = out_port;
+		csid_acquire.sync_mode = CAM_ISP_HW_SYNC_NONE;
+		csid_acquire.node_res = NULL;
+
+		hw_intf = cid_res->hw_res[0]->hw_intf;
+		rc = hw_intf->hw_ops.reserve(hw_intf->hw_priv,
+			&csid_acquire, sizeof(csid_acquire));
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"CSID Path reserve failed hw=%d rc=%d cid=%d",
+				hw_intf->hw_idx, rc,
+				cid_res->hw_res[0]->res_id);
+
+			goto put_res;
+		}
+
+		if (csid_acquire.node_res == NULL) {
+			CAM_ERR(CAM_ISP, "Acquire CSID RDI rsrc failed");
+
+			goto put_res;
+		}
+
+		csid_res->res_type = (enum cam_ife_hw_mgr_res_type)
+			CAM_ISP_RESOURCE_PIX_PATH;
+		csid_res->res_id = csid_acquire.res_id;
+		csid_res->is_dual_vfe = 0;
+		csid_res->hw_res[0] = csid_acquire.node_res;
+		csid_res->hw_res[1] = NULL;
+		CAM_DBG(CAM_ISP, "acquire res %d",
+			csid_acquire.res_id);
+		csid_res->parent = cid_res;
+		cid_res->child[cid_res->num_children++] =
+			csid_res;
+		cam_ife_hw_mgr_put_res(&ife_ctx->res_list_ife_csid, &csid_res);
+
+	}
+
+	return 0;
+put_res:
+	cam_ife_hw_mgr_put_res(&ife_ctx->free_res_list, &csid_res);
+end:
+	return rc;
+}
+
+static int cam_ife_hw_mgr_acquire_res_root(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port)
+{
+	int rc = -1;
+
+	if (ife_ctx->res_list_ife_in.res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+		/* first acquire */
+		ife_ctx->res_list_ife_in.res_type = CAM_IFE_HW_MGR_RES_ROOT;
+		ife_ctx->res_list_ife_in.res_id = in_port->res_type;
+		ife_ctx->res_list_ife_in.is_dual_vfe = in_port->usage_type;
+	} else if (ife_ctx->res_list_ife_in.res_id != in_port->res_type) {
+		CAM_ERR(CAM_ISP, "No Free resource for this context");
+		goto err;
+	} else {
+		/* else do nothing */
+	}
+	return 0;
+err:
+	/* release resource in entry function */
+	return rc;
+}
+
+static int cam_ife_hw_mgr_preprocess_out_port(
+	struct cam_ife_hw_mgr_ctx   *ife_ctx,
+	struct cam_isp_in_port_info *in_port,
+	int                         *ipp_count,
+	int                         *rdi_count,
+	int                         *ppp_count)
+{
+	int ipp_num        = 0;
+	int rdi_num        = 0;
+	int ppp_num        = 0;
+	uint32_t i;
+	struct cam_isp_out_port_info      *out_port;
+	struct cam_ife_hw_mgr             *ife_hw_mgr;
+
+	ife_hw_mgr = ife_ctx->hw_mgr;
+
+	for (i = 0; i < in_port->num_out_res; i++) {
+		out_port = &in_port->data[i];
+		if (cam_ife_hw_mgr_is_rdi_res(out_port->res_type))
+			rdi_num++;
+		else if (out_port->res_type == CAM_ISP_IFE_OUT_RES_2PD)
+			ppp_num++;
+		else
+			ipp_num++;
+	}
+
+	*ipp_count = ipp_num;
+	*rdi_count = rdi_num;
+	*ppp_count = ppp_num;
+
+	return 0;
+}
+
+static int cam_ife_mgr_acquire_hw_for_ctx(
+	struct cam_ife_hw_mgr_ctx          *ife_ctx,
+	struct cam_isp_in_port_info        *in_port,
+	uint32_t  *num_pix_port, uint32_t  *num_rdi_port)
+{
+	int rc                                    = -1;
+	int is_dual_vfe                           = 0;
+	int ipp_count                             = 0;
+	int rdi_count                             = 0;
+	int ppp_count                             = 0;
+
+	is_dual_vfe = in_port->usage_type;
+
+	/* get root node resource */
+	rc = cam_ife_hw_mgr_acquire_res_root(ife_ctx, in_port);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Can not acquire csid rx resource");
+		goto err;
+	}
+
+	cam_ife_hw_mgr_preprocess_out_port(ife_ctx, in_port,
+		&ipp_count, &rdi_count, &ppp_count);
+
+	if (!ipp_count && !rdi_count && !ppp_count) {
+		CAM_ERR(CAM_ISP, "No PIX or RDI or PPP resource");
+		return -EINVAL;
+	}
+
+	if (ipp_count) {
+		/* get ife csid IPP resource */
+		rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx,
+			in_port, true);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Acquire IFE CSID IPP resource Failed");
+			goto err;
+		}
+	}
+
+	if (rdi_count) {
+		/* get ife csid rdi resource */
+		rc = cam_ife_hw_mgr_acquire_res_ife_csid_rdi(ife_ctx, in_port);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Acquire IFE CSID RDI resource Failed");
+			goto err;
+		}
+	}
+
+	if (ppp_count) {
+		/* get ife csid PPP resource */
+		rc = cam_ife_hw_mgr_acquire_res_ife_csid_pxl(ife_ctx,
+			in_port, false);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Acquire IFE CSID PPP resource Failed");
+			goto err;
+		}
+	}
+
+
+	/* get ife src resource */
+	rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx, in_port);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Acquire IFE SRC resource Failed");
+		goto err;
+	}
+
+	rc = cam_ife_hw_mgr_acquire_res_ife_out(ife_ctx, in_port);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Acquire IFE OUT resource Failed");
+		goto err;
+	}
+
+	*num_pix_port += ipp_count + ppp_count;
+	*num_rdi_port += rdi_count;
+
+	return 0;
+err:
+	/* release resource at the acquire entry funciton */
+	return rc;
+}
+
+void cam_ife_cam_cdm_callback(uint32_t handle, void *userdata,
+	enum cam_cdm_cb_status status, uint64_t cookie)
+{
+	struct cam_ife_hw_mgr_ctx *ctx = NULL;
+
+	if (!userdata) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return;
+	}
+
+	ctx = userdata;
+
+	if (status == CAM_CDM_CB_STATUS_BL_SUCCESS) {
+		complete(&ctx->config_done_complete);
+		CAM_DBG(CAM_ISP,
+			"Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+			 handle, userdata, status, cookie);
+	} else {
+		CAM_WARN(CAM_ISP,
+			"Called by CDM hdl=%x, udata=%pK, status=%d, cookie=%llu",
+			 handle, userdata, status, cookie);
+	}
+}
+
+/* entry function: acquire_hw */
+static int cam_ife_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	struct cam_ife_hw_mgr *ife_hw_mgr            = hw_mgr_priv;
+	struct cam_hw_acquire_args *acquire_args     = acquire_hw_args;
+	int rc                                       = -1;
+	int i, j;
+	struct cam_ife_hw_mgr_ctx         *ife_ctx;
+	struct cam_isp_in_port_info       *in_port = NULL;
+	struct cam_cdm_acquire_data        cdm_acquire;
+	uint32_t                           num_pix_port_per_in = 0;
+	uint32_t                           num_rdi_port_per_in = 0;
+	uint32_t                           total_pix_port = 0;
+	uint32_t                           total_rdi_port = 0;
+	uint32_t                           in_port_length = 0;
+	uint32_t                           total_in_port_length = 0;
+	struct cam_isp_acquire_hw_info    *acquire_hw_info = NULL;
+
+	CAM_DBG(CAM_ISP, "Enter...");
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+		return -EINVAL;
+	}
+
+	/* get the ife ctx */
+	rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+	if (rc || !ife_ctx) {
+		CAM_ERR(CAM_ISP, "Get ife hw context failed");
+		goto err;
+	}
+
+	ife_ctx->common.cb_priv = acquire_args->context_data;
+	for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
+		ife_ctx->common.event_cb[i] = acquire_args->event_cb;
+
+	ife_ctx->hw_mgr = ife_hw_mgr;
+
+
+	memcpy(cdm_acquire.identifier, "ife", sizeof("ife"));
+	cdm_acquire.cell_index = 0;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = ife_ctx;
+	cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX;
+	for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		if (ife_hw_mgr->cdm_reg_map[i])
+			cdm_acquire.base_array[j++] =
+				ife_hw_mgr->cdm_reg_map[i];
+	}
+	cdm_acquire.base_array_cnt = j;
+
+
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
+		goto free_ctx;
+	}
+
+	CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
+		cdm_acquire.handle);
+	ife_ctx->cdm_handle = cdm_acquire.handle;
+	ife_ctx->cdm_ops = cdm_acquire.ops;
+
+	acquire_hw_info =
+		(struct cam_isp_acquire_hw_info *)acquire_args->acquire_info;
+	in_port = (struct cam_isp_in_port_info *)
+		((uint8_t *)&acquire_hw_info->data +
+		 acquire_hw_info->input_info_offset);
+
+	/* acquire HW resources */
+	for (i = 0; i < acquire_hw_info->num_inputs; i++) {
+		in_port_length = sizeof(struct cam_isp_in_port_info) +
+			(in_port->num_out_res - 1) *
+			sizeof(struct cam_isp_out_port_info);
+		total_in_port_length += in_port_length;
+
+		if (total_in_port_length > acquire_hw_info->input_info_size) {
+			CAM_ERR(CAM_ISP, "buffer size is not enough");
+			rc = -EINVAL;
+			goto free_res;
+		}
+		rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
+			&num_pix_port_per_in, &num_rdi_port_per_in);
+		total_pix_port += num_pix_port_per_in;
+		total_rdi_port += num_rdi_port_per_in;
+
+		if (rc) {
+			CAM_ERR(CAM_ISP, "can not acquire resource");
+			goto free_res;
+		}
+		in_port = (struct cam_isp_in_port_info *)((uint8_t *)in_port +
+			in_port_length);
+	}
+
+	/* Check whether context has only RDI resource */
+	if (!total_pix_port) {
+		ife_ctx->is_rdi_only_context = 1;
+		CAM_DBG(CAM_ISP, "RDI only context");
+	}
+
+	/* Process base info */
+	rc = cam_ife_mgr_process_base_info(ife_ctx);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Process base info failed");
+		goto free_res;
+	}
+
+	acquire_args->ctxt_to_hw_map = ife_ctx;
+	ife_ctx->ctx_in_use = 1;
+
+	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
+
+	CAM_DBG(CAM_ISP, "Exit...(success)");
+
+	return 0;
+free_res:
+	cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
+	cam_cdm_release(ife_ctx->cdm_handle);
+free_ctx:
+	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+err:
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+/* entry function: acquire_hw */
+static int cam_ife_mgr_acquire_dev(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	struct cam_ife_hw_mgr *ife_hw_mgr            = hw_mgr_priv;
+	struct cam_hw_acquire_args *acquire_args     = acquire_hw_args;
+	int rc                                       = -1;
+	int i, j;
+	struct cam_ife_hw_mgr_ctx         *ife_ctx;
+	struct cam_isp_in_port_info       *in_port = NULL;
+	struct cam_isp_resource           *isp_resource = NULL;
+	struct cam_cdm_acquire_data        cdm_acquire;
+	uint32_t                           num_pix_port_per_in = 0;
+	uint32_t                           num_rdi_port_per_in = 0;
+	uint32_t                           total_pix_port = 0;
+	uint32_t                           total_rdi_port = 0;
+	uint32_t                           in_port_length = 0;
+
+	CAM_DBG(CAM_ISP, "Enter...");
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+		return -EINVAL;
+	}
+
+	/* get the ife ctx */
+	rc = cam_ife_hw_mgr_get_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+	if (rc || !ife_ctx) {
+		CAM_ERR(CAM_ISP, "Get ife hw context failed");
+		goto err;
+	}
+
+	ife_ctx->common.cb_priv = acquire_args->context_data;
+	for (i = 0; i < CAM_ISP_HW_EVENT_MAX; i++)
+		ife_ctx->common.event_cb[i] = acquire_args->event_cb;
+
+	ife_ctx->hw_mgr = ife_hw_mgr;
+
+
+	memcpy(cdm_acquire.identifier, "ife", sizeof("ife"));
+	cdm_acquire.cell_index = 0;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = ife_ctx;
+	cdm_acquire.base_array_cnt = CAM_IFE_HW_NUM_MAX;
+	for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		if (ife_hw_mgr->cdm_reg_map[i])
+			cdm_acquire.base_array[j++] =
+				ife_hw_mgr->cdm_reg_map[i];
+	}
+	cdm_acquire.base_array_cnt = j;
+
+
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.cam_cdm_callback = cam_ife_cam_cdm_callback;
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to acquire the CDM HW");
+		goto free_ctx;
+	}
+
+	CAM_DBG(CAM_ISP, "Successfully acquired the CDM HW hdl=%x",
+		cdm_acquire.handle);
+	ife_ctx->cdm_handle = cdm_acquire.handle;
+	ife_ctx->cdm_ops = cdm_acquire.ops;
+
+	isp_resource = (struct cam_isp_resource *)acquire_args->acquire_info;
+
+	/* acquire HW resources */
+	for (i = 0; i < acquire_args->num_acq; i++) {
+		if (isp_resource[i].resource_id != CAM_ISP_RES_ID_PORT)
+			continue;
+
+		CAM_DBG(CAM_ISP,
+			"start copy from user handle %lld with len = %d",
+			isp_resource[i].res_hdl,
+			isp_resource[i].length);
+
+		in_port_length = sizeof(struct cam_isp_in_port_info);
+
+		if (in_port_length > isp_resource[i].length) {
+			CAM_ERR(CAM_ISP, "buffer size is not enough");
+			rc = -EINVAL;
+			goto free_res;
+		}
+
+		in_port = memdup_user(
+			u64_to_user_ptr(isp_resource[i].res_hdl),
+			isp_resource[i].length);
+		if (!IS_ERR(in_port)) {
+			in_port_length = sizeof(struct cam_isp_in_port_info) +
+				(in_port->num_out_res - 1) *
+				sizeof(struct cam_isp_out_port_info);
+			if (in_port_length > isp_resource[i].length) {
+				CAM_ERR(CAM_ISP, "buffer size is not enough");
+				rc = -EINVAL;
+				kfree(in_port);
+				goto free_res;
+			}
+
+			rc = cam_ife_mgr_acquire_hw_for_ctx(ife_ctx, in_port,
+				&num_pix_port_per_in, &num_rdi_port_per_in);
+			total_pix_port += num_pix_port_per_in;
+			total_rdi_port += num_rdi_port_per_in;
+
+			kfree(in_port);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "can not acquire resource");
+				goto free_res;
+			}
+		} else {
+			CAM_ERR(CAM_ISP,
+				"Copy from user failed with in_port = %pK",
+				in_port);
+			rc = -EFAULT;
+			goto free_res;
+		}
+	}
+
+	/* Check whether context has only RDI resource */
+	if (!total_pix_port) {
+		ife_ctx->is_rdi_only_context = 1;
+		CAM_DBG(CAM_ISP, "RDI only context");
+	}
+
+	/* Process base info */
+	rc = cam_ife_mgr_process_base_info(ife_ctx);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Process base info failed");
+		goto free_res;
+	}
+
+	acquire_args->ctxt_to_hw_map = ife_ctx;
+	ife_ctx->ctx_in_use = 1;
+
+	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->used_ctx_list, &ife_ctx);
+
+	CAM_DBG(CAM_ISP, "Exit...(success)");
+
+	return 0;
+free_res:
+	cam_ife_hw_mgr_release_hw_for_ctx(ife_ctx);
+	cam_cdm_release(ife_ctx->cdm_handle);
+free_ctx:
+	cam_ife_hw_mgr_put_ctx(&ife_hw_mgr->free_ctx_list, &ife_ctx);
+err:
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+/* entry function: acquire_hw */
+static int cam_ife_mgr_acquire(void *hw_mgr_priv,
+					void *acquire_hw_args)
+{
+	struct cam_hw_acquire_args *acquire_args     = acquire_hw_args;
+	int rc                                       = -1;
+
+	CAM_DBG(CAM_ISP, "Enter...");
+
+	if (!acquire_args || acquire_args->num_acq <= 0) {
+		CAM_ERR(CAM_ISP, "Nothing to acquire. Seems like error");
+		return -EINVAL;
+	}
+
+	if (acquire_args->num_acq == CAM_API_COMPAT_CONSTANT)
+		rc = cam_ife_mgr_acquire_hw(hw_mgr_priv, acquire_hw_args);
+	else
+		rc = cam_ife_mgr_acquire_dev(hw_mgr_priv, acquire_hw_args);
+
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+
+static int cam_isp_blob_bw_update(
+	struct cam_isp_bw_config              *bw_config,
+	struct cam_ife_hw_mgr_ctx             *ctx)
+{
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_vfe_bw_update_args          bw_upd_args;
+	uint64_t                               cam_bw_bps = 0;
+	uint64_t                               ext_bw_bps = 0;
+	int                                    rc = -EINVAL;
+	uint32_t                               i;
+	bool                                   camif_l_bw_updated = false;
+	bool                                   camif_r_bw_updated = false;
+
+	CAM_DBG(CAM_PERF,
+		"usage=%u left cam_bw_bps=%llu ext_bw_bps=%llu\n"
+		"right cam_bw_bps=%llu ext_bw_bps=%llu",
+		bw_config->usage_type,
+		bw_config->left_pix_vote.cam_bw_bps,
+		bw_config->left_pix_vote.ext_bw_bps,
+		bw_config->right_pix_vote.cam_bw_bps,
+		bw_config->right_pix_vote.ext_bw_bps);
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF)
+				if (i == CAM_ISP_HW_SPLIT_LEFT) {
+					if (camif_l_bw_updated)
+						continue;
+
+					cam_bw_bps =
+					bw_config->left_pix_vote.cam_bw_bps;
+					ext_bw_bps =
+					bw_config->left_pix_vote.ext_bw_bps;
+
+					camif_l_bw_updated = true;
+				} else {
+					if (camif_r_bw_updated)
+						continue;
+
+					cam_bw_bps =
+					bw_config->right_pix_vote.cam_bw_bps;
+					ext_bw_bps =
+					bw_config->right_pix_vote.ext_bw_bps;
+
+					camif_r_bw_updated = true;
+				}
+			else if ((hw_mgr_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0)
+						&& (hw_mgr_res->res_id <=
+						CAM_ISP_HW_VFE_IN_RDI3)) {
+				uint32_t idx = hw_mgr_res->res_id -
+						CAM_ISP_HW_VFE_IN_RDI0;
+				if (idx >= bw_config->num_rdi)
+					continue;
+
+				cam_bw_bps =
+					bw_config->rdi_vote[idx].cam_bw_bps;
+				ext_bw_bps =
+					bw_config->rdi_vote[idx].ext_bw_bps;
+			} else if (hw_mgr_res->res_id ==
+				CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
+				if (i == CAM_ISP_HW_SPLIT_LEFT) {
+					if (camif_l_bw_updated)
+						continue;
+
+					cam_bw_bps =
+					bw_config->left_pix_vote.cam_bw_bps;
+					ext_bw_bps =
+					bw_config->left_pix_vote.ext_bw_bps;
+
+					camif_l_bw_updated = true;
+				} else {
+					if (camif_r_bw_updated)
+						continue;
+
+					cam_bw_bps =
+					bw_config->right_pix_vote.cam_bw_bps;
+					ext_bw_bps =
+					bw_config->right_pix_vote.ext_bw_bps;
+
+					camif_r_bw_updated = true;
+				}
+			} else
+				if (hw_mgr_res->hw_res[i]) {
+					CAM_ERR(CAM_ISP, "Invalid res_id %u",
+						hw_mgr_res->res_id);
+					rc = -EINVAL;
+					return rc;
+				}
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				bw_upd_args.node_res =
+					hw_mgr_res->hw_res[i];
+
+				bw_upd_args.camnoc_bw_bytes = cam_bw_bps;
+				bw_upd_args.external_bw_bytes = ext_bw_bps;
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_BW_UPDATE,
+					&bw_upd_args,
+					sizeof(struct cam_vfe_bw_update_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "BW Update failed");
+			} else
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+		}
+	}
+
+	return rc;
+}
+
+/* entry function: config_hw */
+static int cam_ife_mgr_config_hw(void *hw_mgr_priv,
+					void *config_hw_args)
+{
+	int rc = -1, i;
+	struct cam_hw_config_args *cfg;
+	struct cam_hw_update_entry *cmd;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_ife_hw_mgr_ctx *ctx;
+	struct cam_isp_prepare_hw_update_data *hw_update_data;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	if (!hw_mgr_priv || !config_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	cfg = config_hw_args;
+	ctx = (struct cam_ife_hw_mgr_ctx *)cfg->ctxt_to_hw_map;
+	if (!ctx) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	if (!ctx->ctx_in_use || !ctx->cdm_cmd) {
+		CAM_ERR(CAM_ISP, "Invalid context parameters");
+		return -EPERM;
+	}
+	if (atomic_read(&ctx->overflow_pending))
+		return -EINVAL;
+
+	hw_update_data = (struct cam_isp_prepare_hw_update_data  *) cfg->priv;
+
+	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		if (hw_update_data->bw_config_valid[i] == true) {
+			rc = cam_isp_blob_bw_update(
+				(struct cam_isp_bw_config *)
+				&hw_update_data->bw_config[i], ctx);
+			if (rc)
+				CAM_ERR(CAM_ISP, "Bandwidth Update Failed");
+			}
+	}
+
+	CAM_DBG(CAM_ISP, "Enter ctx id:%d num_hw_upd_entries %d",
+		ctx->ctx_index, cfg->num_hw_update_entries);
+
+	if (cfg->num_hw_update_entries > 0) {
+		cdm_cmd = ctx->cdm_cmd;
+		cdm_cmd->cmd_arrary_count = cfg->num_hw_update_entries;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = true;
+		cdm_cmd->userdata = ctx;
+		cdm_cmd->cookie = cfg->request_id;
+
+		for (i = 0 ; i <= cfg->num_hw_update_entries; i++) {
+			cmd = (cfg->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		if (cfg->init_packet)
+			init_completion(&ctx->config_done_complete);
+
+		CAM_DBG(CAM_ISP, "Submit to CDM");
+		rc = cam_cdm_submit_bls(ctx->cdm_handle, cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Failed to apply the configs");
+			return rc;
+		}
+
+		if (cfg->init_packet) {
+			rc = wait_for_completion_timeout(
+				&ctx->config_done_complete,
+				msecs_to_jiffies(30));
+			if (rc <= 0) {
+				CAM_ERR(CAM_ISP,
+					"config done completion timeout for req_id=%llu rc = %d",
+					cfg->request_id, rc);
+				if (rc == 0)
+					rc = -ETIMEDOUT;
+			} else {
+				rc = 0;
+				CAM_DBG(CAM_ISP,
+					"config done Success for req_id=%llu",
+					cfg->request_id);
+			}
+		}
+	} else {
+		CAM_ERR(CAM_ISP, "No commands to config");
+	}
+	CAM_DBG(CAM_ISP, "Exit");
+
+	return rc;
+}
+
+static int cam_ife_mgr_stop_hw_in_overflow(void *stop_hw_args)
+{
+	int                               rc        = 0;
+	struct cam_hw_stop_args          *stop_args = stop_hw_args;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	uint32_t                          i, master_base_idx = 0;
+
+	if (!stop_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
+		ctx->ctx_index);
+
+	if (!ctx->num_base) {
+		CAM_ERR(CAM_ISP, "Number of bases are zero");
+		return -EINVAL;
+	}
+
+	/* get master base index first */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
+			master_base_idx = ctx->base[i].idx;
+			break;
+		}
+	}
+
+	if (i == ctx->num_base)
+		master_base_idx = ctx->base[0].idx;
+
+
+	/* stop the master CIDs first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+		master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+
+	/* stop rest of the CIDs  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY);
+	}
+
+	/* stop the master CSID path first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+		master_base_idx, CAM_CSID_HALT_IMMEDIATELY);
+
+	/* Stop rest of the CSID paths  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (i == master_base_idx)
+			continue;
+
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+			ctx->base[i].idx, CAM_CSID_HALT_IMMEDIATELY);
+	}
+
+	/* IFE mux in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
+	}
+
+	/* IFE out resources */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+		cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
+
+
+	/* Stop tasklet for context */
+	cam_tasklet_stop(ctx->common.tasklet_info);
+	CAM_DBG(CAM_ISP, "Exit...ctx id:%d rc :%d",
+		ctx->ctx_index, rc);
+
+	return rc;
+}
+
+static int cam_ife_mgr_bw_control(struct cam_ife_hw_mgr_ctx *ctx,
+	enum cam_vfe_bw_control_action action)
+{
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_vfe_bw_control_args         bw_ctrl_args;
+	int                                    rc = -EINVAL;
+	uint32_t                               i;
+
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d", ctx->ctx_index);
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				bw_ctrl_args.node_res =
+					hw_mgr_res->hw_res[i];
+				bw_ctrl_args.action = action;
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_BW_CONTROL,
+					&bw_ctrl_args,
+					sizeof(struct cam_vfe_bw_control_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "BW Update failed");
+			} else
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+		}
+	}
+
+	return rc;
+}
+
+static int cam_ife_mgr_pause_hw(struct cam_ife_hw_mgr_ctx *ctx)
+{
+	return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_EXCLUDE);
+}
+
+/* entry function: stop_hw */
+static int cam_ife_mgr_stop_hw(void *hw_mgr_priv, void *stop_hw_args)
+{
+	int                               rc        = 0;
+	struct cam_hw_stop_args          *stop_args = stop_hw_args;
+	struct cam_isp_stop_args         *stop_isp;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	enum cam_ife_csid_halt_cmd        csid_halt_type;
+	uint32_t                          i, master_base_idx = 0;
+
+	if (!hw_mgr_priv || !stop_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)stop_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, " Enter...ctx id:%d", ctx->ctx_index);
+	stop_isp = (struct cam_isp_stop_args    *)stop_args->args;
+
+	if ((stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_IMMEDIATELY) &&
+		(stop_isp->stop_only)) {
+		CAM_ERR(CAM_ISP, "Invalid params hw_stop_cmd:%d stop_only:%d",
+			stop_isp->hw_stop_cmd, stop_isp->stop_only);
+		return -EPERM;
+	}
+
+	/* Set the csid halt command */
+	if (stop_isp->hw_stop_cmd == CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY)
+		csid_halt_type = CAM_CSID_HALT_AT_FRAME_BOUNDARY;
+	else
+		csid_halt_type = CAM_CSID_HALT_IMMEDIATELY;
+
+	/* Note:stop resource will remove the irq mask from the hardware */
+
+	if (!ctx->num_base) {
+		CAM_ERR(CAM_ISP, "number of bases are zero");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Halting CSIDs");
+
+	if (cam_cdm_stream_off(ctx->cdm_handle))
+		CAM_ERR(CAM_ISP, "CDM stream off failed %d",
+			ctx->cdm_handle);
+
+	CAM_DBG(CAM_ISP, "Going to stop IFE Out");
+
+	/* IFE out resources */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++)
+		cam_ife_hw_mgr_stop_hw_res(&ctx->res_list_ife_out[i]);
+	/* get master base index first */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].split_id == CAM_ISP_HW_SPLIT_LEFT) {
+			master_base_idx = ctx->base[i].idx;
+			break;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Going to stop IFE Mux");
+
+	/* IFE mux in resources */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		cam_ife_hw_mgr_stop_hw_res(hw_mgr_res);
+	}
+
+	cam_tasklet_stop(ctx->common.tasklet_info);
+
+	/*
+	 * If Context does not have PIX resources and has only RDI resource
+	 * then take the first base index.
+	 */
+	if (i == ctx->num_base)
+		master_base_idx = ctx->base[0].idx;
+	CAM_DBG(CAM_ISP, "Stopping master CSID idx %d", master_base_idx);
+
+	/* Stop the master CSID path first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+		master_base_idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+
+	/* stop rest of the CSID paths  */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].idx == master_base_idx)
+			continue;
+		CAM_DBG(CAM_ISP, "Stopping CSID idx %d i %d master %d",
+			ctx->base[i].idx, i, master_base_idx);
+
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_csid,
+			ctx->base[i].idx, CAM_CSID_HALT_AT_FRAME_BOUNDARY);
+	}
+
+	CAM_DBG(CAM_ISP, "Stopping master CID idx %d", master_base_idx);
+
+	/* Stop the master CIDs first */
+	cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			master_base_idx, csid_halt_type);
+
+	/* stop rest of the CIDs */
+	for (i = 0; i < ctx->num_base; i++) {
+		if (ctx->base[i].idx == master_base_idx)
+			continue;
+		CAM_DBG(CAM_ISP, "Stopping CID idx %d i %d master %d",
+			ctx->base[i].idx, i, master_base_idx);
+		cam_ife_mgr_csid_stop_hw(ctx, &ctx->res_list_ife_cid,
+			ctx->base[i].idx, csid_halt_type);
+	}
+
+	cam_ife_mgr_pause_hw(ctx);
+
+	if (stop_isp->stop_only)
+		goto end;
+
+	cam_ife_hw_mgr_deinit_hw(ctx);
+	CAM_DBG(CAM_ISP,
+		"Stop success for ctx id:%d rc :%d", ctx->ctx_index, rc);
+
+end:
+
+	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	if (!atomic_dec_return(&g_ife_hw_mgr.active_ctx_cnt)) {
+		rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_DISABLE);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"SAFE SCM call failed:Check TZ/HYP dependency");
+			rc = 0;
+		}
+	}
+	mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
+	return rc;
+}
+
+static int cam_ife_mgr_reset_vfe_hw(struct cam_ife_hw_mgr *hw_mgr,
+			uint32_t hw_idx)
+{
+	uint32_t i = 0;
+	struct cam_hw_intf             *vfe_hw_intf;
+	uint32_t vfe_reset_type;
+
+	if (!hw_mgr) {
+		CAM_DBG(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+	/* Reset VFE HW*/
+	vfe_reset_type = CAM_VFE_HW_RESET_HW;
+
+	for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
+		if (hw_idx != hw_mgr->ife_devices[i]->hw_idx)
+			continue;
+		CAM_DBG(CAM_ISP, "VFE (id = %d) reset", hw_idx);
+		vfe_hw_intf = hw_mgr->ife_devices[i];
+		vfe_hw_intf->hw_ops.reset(vfe_hw_intf->hw_priv,
+			&vfe_reset_type, sizeof(vfe_reset_type));
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "Exit Successfully");
+	return 0;
+}
+
+static int cam_ife_mgr_restart_hw(void *start_hw_args)
+{
+	int                               rc = -1;
+	struct cam_hw_start_args         *start_args = start_hw_args;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	uint32_t                          i;
+
+	if (!start_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)start_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d", ctx->ctx_index);
+
+	cam_tasklet_start(ctx->common.tasklet_info);
+
+	/* start the IFE out devices */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		rc = cam_ife_hw_mgr_start_hw_res(
+			&ctx->res_list_ife_out[i], ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)", i);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d", ctx->ctx_index);
+	/* Start the IFE mux in devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
+				 hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d", ctx->ctx_index);
+	/* Start the IFE CSID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
+				 hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d", ctx->ctx_index);
+	/* Start IFE root node: do nothing */
+	CAM_DBG(CAM_ISP, "Exit...(success)");
+	return 0;
+
+err:
+	cam_ife_mgr_stop_hw_in_overflow(start_hw_args);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+static int cam_ife_mgr_start_hw(void *hw_mgr_priv, void *start_hw_args)
+{
+	int                               rc = -1;
+	struct cam_isp_start_args        *start_isp = start_hw_args;
+	struct cam_hw_stop_args           stop_args;
+	struct cam_isp_stop_args          stop_isp;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	struct cam_ife_hw_mgr_res        *hw_mgr_res;
+	struct cam_isp_resource_node     *rsrc_node = NULL;
+	uint32_t                          i, camif_debug;
+
+	if (!hw_mgr_priv || !start_isp) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)
+		start_isp->hw_config.ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter... ctx id:%d",
+		ctx->ctx_index);
+
+	/* update Bandwidth should be done at the hw layer */
+
+	cam_tasklet_start(ctx->common.tasklet_info);
+
+	if (ctx->init_done && start_isp->start_only)
+		goto start_only;
+
+	/* set current csid debug information to CSID HW */
+	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		if (g_ife_hw_mgr.csid_devices[i])
+			rc = g_ife_hw_mgr.csid_devices[i]->hw_ops.process_cmd(
+				g_ife_hw_mgr.csid_devices[i]->hw_priv,
+				CAM_IFE_CSID_SET_CSID_DEBUG,
+				&g_ife_hw_mgr.debug_cfg.csid_debug,
+				sizeof(g_ife_hw_mgr.debug_cfg.csid_debug));
+	}
+
+	camif_debug = g_ife_hw_mgr.debug_cfg.camif_debug;
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			rsrc_node = hw_mgr_res->hw_res[i];
+			if (rsrc_node->process_cmd && (rsrc_node->res_id ==
+				CAM_ISP_HW_VFE_IN_CAMIF)) {
+				rc = hw_mgr_res->hw_res[i]->process_cmd(
+					hw_mgr_res->hw_res[i],
+					CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
+					&camif_debug,
+					sizeof(camif_debug));
+			}
+		}
+	}
+
+	rc = cam_ife_hw_mgr_init_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Init failed");
+		goto err;
+	}
+
+start_only:
+
+	mutex_lock(&g_ife_hw_mgr.ctx_mutex);
+	if (!atomic_fetch_inc(&g_ife_hw_mgr.active_ctx_cnt)) {
+		rc = cam_ife_notify_safe_lut_scm(CAM_IFE_SAFE_ENABLE);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"SAFE SCM call failed:Check TZ/HYP dependency");
+			rc = -1;
+		}
+	}
+	mutex_unlock(&g_ife_hw_mgr.ctx_mutex);
+
+	CAM_DBG(CAM_ISP, "start cdm interface");
+	rc = cam_cdm_stream_on(ctx->cdm_handle);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Can not start cdm (%d)",
+			 ctx->cdm_handle);
+		goto err;
+	}
+
+	if (!start_isp->start_only) {
+		/* Apply initial configuration */
+		CAM_DBG(CAM_ISP, "Config HW");
+		rc = cam_ife_mgr_config_hw(hw_mgr_priv,
+			&start_isp->hw_config);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Config HW failed");
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START IFE OUT ... in ctx id:%d",
+		ctx->ctx_index);
+	/* start the IFE out devices */
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		rc = cam_ife_hw_mgr_start_hw_res(
+			&ctx->res_list_ife_out[i], ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start IFE OUT (%d)",
+				 i);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START IFE SRC ... in ctx id:%d",
+		ctx->ctx_index);
+	/* Start the IFE mux in devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start IFE MUX (%d)",
+				 hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START CSID HW ... in ctx id:%d",
+		ctx->ctx_index);
+	/* Start the IFE CSID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
+				 hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "START CID SRC ... in ctx id:%d",
+		ctx->ctx_index);
+	/* Start the IFE CID HW devices */
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_cid, list) {
+		rc = cam_ife_hw_mgr_start_hw_res(hw_mgr_res, ctx);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Can not start IFE CSID (%d)",
+				hw_mgr_res->res_id);
+			goto err;
+		}
+	}
+
+	ctx->init_done = true;
+	/* Start IFE root node: do nothing */
+	CAM_DBG(CAM_ISP, "Start success for ctx id:%d", ctx->ctx_index);
+
+	return 0;
+
+err:
+	stop_isp.stop_only = false;
+	stop_isp.hw_stop_cmd = CAM_ISP_HW_STOP_IMMEDIATELY;
+	stop_args.ctxt_to_hw_map = start_isp->hw_config.ctxt_to_hw_map;
+	stop_args.args = (void *)(&stop_isp);
+
+	cam_ife_mgr_stop_hw(hw_mgr_priv, &stop_args);
+	CAM_DBG(CAM_ISP, "Exit...(rc=%d)", rc);
+	return rc;
+}
+
+static int cam_ife_mgr_read(void *hw_mgr_priv, void *read_args)
+{
+	return -EPERM;
+}
+
+static int cam_ife_mgr_write(void *hw_mgr_priv, void *write_args)
+{
+	return -EPERM;
+}
+
+static int cam_ife_mgr_release_hw(void *hw_mgr_priv,
+					void *release_hw_args)
+{
+	int                               rc           = 0;
+	struct cam_hw_release_args       *release_args = release_hw_args;
+	struct cam_ife_hw_mgr            *hw_mgr       = hw_mgr_priv;
+	struct cam_ife_hw_mgr_ctx        *ctx;
+	uint32_t                          i;
+
+	if (!hw_mgr_priv || !release_hw_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	ctx = (struct cam_ife_hw_mgr_ctx *)release_args->ctxt_to_hw_map;
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Invalid context is used");
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter...ctx id:%d",
+		ctx->ctx_index);
+
+	if (ctx->init_done)
+		cam_ife_hw_mgr_deinit_hw(ctx);
+
+	/* we should called the stop hw before this already */
+	cam_ife_hw_mgr_release_hw_for_ctx(ctx);
+
+	/* reset base info */
+	ctx->num_base = 0;
+	memset(ctx->base, 0, sizeof(ctx->base));
+
+	/* release cdm handle */
+	cam_cdm_release(ctx->cdm_handle);
+
+	/* clean context */
+	list_del_init(&ctx->list);
+	ctx->ctx_in_use = 0;
+	ctx->is_rdi_only_context = 0;
+	ctx->cdm_handle = 0;
+	ctx->cdm_ops = NULL;
+	atomic_set(&ctx->overflow_pending, 0);
+	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		ctx->sof_cnt[i] = 0;
+		ctx->eof_cnt[i] = 0;
+		ctx->epoch_cnt[i] = 0;
+	}
+	CAM_DBG(CAM_ISP, "Exit...ctx id:%d",
+		ctx->ctx_index);
+	cam_ife_hw_mgr_put_ctx(&hw_mgr->free_ctx_list, &ctx);
+	return rc;
+}
+
+static int cam_isp_blob_ubwc_update(
+	uint32_t                               blob_type,
+	struct cam_isp_generic_blob_info      *blob_info,
+	struct cam_ubwc_config                *ubwc_config,
+	struct cam_hw_prepare_update_args     *prepare)
+{
+	struct cam_ubwc_plane_cfg_v1          *ubwc_plane_cfg;
+	struct cam_kmd_buf_info               *kmd_buf_info;
+	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	uint32_t                               res_id_out, i;
+	uint32_t                               total_used_bytes = 0;
+	uint32_t                               kmd_buf_remain_size;
+	uint32_t                              *cmd_buf_addr;
+	uint32_t                               bytes_used = 0;
+	int                                    num_ent, rc = 0;
+
+	ctx = prepare->ctxt_to_hw_map;
+	if (!ctx) {
+		CAM_ERR(CAM_ISP, "Invalid ctx");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((prepare->num_hw_update_entries + 1) >=
+		prepare->max_hw_update_entries) {
+		CAM_ERR(CAM_ISP, "Insufficient HW entries :%d max:%d",
+			prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (ubwc_config->api_version) {
+	case CAM_UBWC_CFG_VERSION_1:
+		CAM_DBG(CAM_ISP, "num_ports= %d", ubwc_config->num_ports);
+
+		kmd_buf_info = blob_info->kmd_buf_info;
+		for (i = 0; i < ubwc_config->num_ports; i++) {
+			ubwc_plane_cfg = &ubwc_config->ubwc_plane_cfg[i][0];
+			res_id_out = ubwc_plane_cfg->port_type & 0xFF;
+
+			CAM_DBG(CAM_ISP, "UBWC config idx %d, port_type=%d", i,
+				ubwc_plane_cfg->port_type);
+
+			if (res_id_out >= CAM_IFE_HW_OUT_RES_MAX) {
+				CAM_ERR(CAM_ISP, "Invalid port type:%x",
+					ubwc_plane_cfg->port_type);
+				rc = -EINVAL;
+				goto end;
+			}
+
+			if ((kmd_buf_info->used_bytes
+				+ total_used_bytes) < kmd_buf_info->size) {
+				kmd_buf_remain_size = kmd_buf_info->size -
+					(kmd_buf_info->used_bytes
+					+ total_used_bytes);
+			} else {
+				CAM_ERR(CAM_ISP,
+				"no free kmd memory for base=%d bytes_used=%u buf_size=%u",
+					blob_info->base_info->idx, bytes_used,
+					kmd_buf_info->size);
+				rc = -ENOMEM;
+				goto end;
+			}
+
+			cmd_buf_addr = kmd_buf_info->cpu_addr +
+				kmd_buf_info->used_bytes/4 +
+				total_used_bytes/4;
+			hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
+
+			if (!hw_mgr_res) {
+				CAM_ERR(CAM_ISP, "Invalid hw_mgr_res");
+				rc = -EINVAL;
+				goto end;
+			}
+
+			rc = cam_isp_add_cmd_buf_update(
+				hw_mgr_res, blob_type,
+				blob_type_hw_cmd_map[blob_type],
+				blob_info->base_info->idx,
+				(void *)cmd_buf_addr,
+				kmd_buf_remain_size,
+				(void *)ubwc_plane_cfg,
+				&bytes_used);
+			if (rc < 0) {
+				CAM_ERR(CAM_ISP,
+					"Failed cmd_update, base_idx=%d, bytes_used=%u, res_id_out=0x%x",
+					blob_info->base_info->idx,
+					bytes_used,
+					res_id_out);
+				goto end;
+			}
+
+			total_used_bytes += bytes_used;
+		}
+
+		if (total_used_bytes) {
+			/* Update the HW entries */
+			num_ent = prepare->num_hw_update_entries;
+			prepare->hw_update_entries[num_ent].handle =
+				kmd_buf_info->handle;
+			prepare->hw_update_entries[num_ent].len =
+				total_used_bytes;
+			prepare->hw_update_entries[num_ent].offset =
+				kmd_buf_info->offset;
+			num_ent++;
+
+			kmd_buf_info->used_bytes += total_used_bytes;
+			kmd_buf_info->offset     += total_used_bytes;
+			prepare->num_hw_update_entries = num_ent;
+		}
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Invalid UBWC API Version %d",
+			ubwc_config->api_version);
+		rc = -EINVAL;
+		break;
+	}
+end:
+	return rc;
+}
+
+static int cam_isp_blob_hfr_update(
+	uint32_t                               blob_type,
+	struct cam_isp_generic_blob_info      *blob_info,
+	struct cam_isp_resource_hfr_config    *hfr_config,
+	struct cam_hw_prepare_update_args     *prepare)
+{
+	struct cam_isp_port_hfr_config        *port_hfr_config;
+	struct cam_kmd_buf_info               *kmd_buf_info;
+	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	uint32_t                               res_id_out, i;
+	uint32_t                               total_used_bytes = 0;
+	uint32_t                               kmd_buf_remain_size;
+	uint32_t                              *cmd_buf_addr;
+	uint32_t                               bytes_used = 0;
+	int                                    num_ent, rc = 0;
+
+	ctx = prepare->ctxt_to_hw_map;
+	CAM_DBG(CAM_ISP, "num_ports= %d",
+		hfr_config->num_ports);
+
+	/* Max one hw entries required for hfr config update */
+	if (prepare->num_hw_update_entries + 1 >=
+			prepare->max_hw_update_entries) {
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	kmd_buf_info = blob_info->kmd_buf_info;
+	for (i = 0; i < hfr_config->num_ports; i++) {
+		port_hfr_config = &hfr_config->port_hfr_config[i];
+		res_id_out = port_hfr_config->resource_type & 0xFF;
+
+		CAM_DBG(CAM_ISP, "hfr config idx %d, type=%d", i,
+			res_id_out);
+
+		if (res_id_out >= CAM_IFE_HW_OUT_RES_MAX) {
+			CAM_ERR(CAM_ISP, "invalid out restype:%x",
+				port_hfr_config->resource_type);
+			return -EINVAL;
+		}
+
+		if ((kmd_buf_info->used_bytes
+			+ total_used_bytes) < kmd_buf_info->size) {
+			kmd_buf_remain_size = kmd_buf_info->size -
+			(kmd_buf_info->used_bytes +
+			total_used_bytes);
+		} else {
+			CAM_ERR(CAM_ISP,
+			"no free kmd memory for base %d",
+			blob_info->base_info->idx);
+			rc = -ENOMEM;
+			return rc;
+		}
+
+		cmd_buf_addr = kmd_buf_info->cpu_addr +
+			kmd_buf_info->used_bytes/4 +
+			total_used_bytes/4;
+		hw_mgr_res = &ctx->res_list_ife_out[res_id_out];
+
+		rc = cam_isp_add_cmd_buf_update(
+			hw_mgr_res, blob_type,
+			blob_type_hw_cmd_map[blob_type],
+			blob_info->base_info->idx,
+			(void *)cmd_buf_addr,
+			kmd_buf_remain_size,
+			(void *)port_hfr_config,
+			&bytes_used);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP,
+				"Failed cmd_update, base_idx=%d, rc=%d",
+				blob_info->base_info->idx, bytes_used);
+			return rc;
+		}
+
+		total_used_bytes += bytes_used;
+	}
+
+	if (total_used_bytes) {
+		/* Update the HW entries */
+		num_ent = prepare->num_hw_update_entries;
+		prepare->hw_update_entries[num_ent].handle =
+			kmd_buf_info->handle;
+		prepare->hw_update_entries[num_ent].len = total_used_bytes;
+		prepare->hw_update_entries[num_ent].offset =
+			kmd_buf_info->offset;
+		num_ent++;
+
+		kmd_buf_info->used_bytes += total_used_bytes;
+		kmd_buf_info->offset     += total_used_bytes;
+		prepare->num_hw_update_entries = num_ent;
+	}
+
+	return rc;
+}
+
+static int cam_isp_blob_csid_clock_update(
+	uint32_t                               blob_type,
+	struct cam_isp_generic_blob_info      *blob_info,
+	struct cam_isp_csid_clock_config      *clock_config,
+	struct cam_hw_prepare_update_args     *prepare)
+{
+	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_ife_csid_clock_update_args  csid_clock_upd_args;
+	uint64_t                               clk_rate = 0;
+	int                                    rc = -EINVAL;
+	uint32_t                               i;
+
+	ctx = prepare->ctxt_to_hw_map;
+
+	CAM_DBG(CAM_ISP,
+		"csid clk=%llu", clock_config->csid_clock);
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			clk_rate = 0;
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+			clk_rate = clock_config->csid_clock;
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				csid_clock_upd_args.clk_rate = clk_rate;
+				CAM_DBG(CAM_ISP, "i= %d clk=%llu\n",
+				i, csid_clock_upd_args.clk_rate);
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					blob_type_hw_cmd_map[blob_type],
+					&csid_clock_upd_args,
+					sizeof(
+					struct cam_ife_csid_clock_update_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "Clock Update failed");
+			} else
+				CAM_ERR(CAM_ISP, "NULL hw_intf!");
+		}
+	}
+
+	return rc;
+}
+
+static int cam_isp_blob_clock_update(
+	uint32_t                               blob_type,
+	struct cam_isp_generic_blob_info      *blob_info,
+	struct cam_isp_clock_config           *clock_config,
+	struct cam_hw_prepare_update_args     *prepare)
+{
+	struct cam_ife_hw_mgr_ctx             *ctx = NULL;
+	struct cam_ife_hw_mgr_res             *hw_mgr_res;
+	struct cam_hw_intf                    *hw_intf;
+	struct cam_vfe_clock_update_args       clock_upd_args;
+	uint64_t                               clk_rate = 0;
+	int                                    rc = -EINVAL;
+	uint32_t                               i;
+	uint32_t                               j;
+	bool                                   camif_l_clk_updated = false;
+	bool                                   camif_r_clk_updated = false;
+
+	ctx = prepare->ctxt_to_hw_map;
+
+	CAM_DBG(CAM_PERF,
+		"usage=%u left_clk= %lu right_clk=%lu",
+		clock_config->usage_type,
+		clock_config->left_pix_hz,
+		clock_config->right_pix_hz);
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			clk_rate = 0;
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			if (hw_mgr_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+				if (i == CAM_ISP_HW_SPLIT_LEFT) {
+					if (camif_l_clk_updated)
+						continue;
+
+					clk_rate =
+						clock_config->left_pix_hz;
+
+					camif_l_clk_updated = true;
+				} else {
+					if (camif_r_clk_updated)
+						continue;
+
+					clk_rate =
+						clock_config->right_pix_hz;
+
+					camif_r_clk_updated = true;
+				}
+			} else if (hw_mgr_res->res_id ==
+				CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
+				if (i == CAM_ISP_HW_SPLIT_LEFT) {
+					if (camif_l_clk_updated)
+						continue;
+
+					clk_rate =
+						clock_config->left_pix_hz;
+
+					camif_l_clk_updated = true;
+				} else {
+					if (camif_r_clk_updated)
+						continue;
+
+					clk_rate =
+						clock_config->right_pix_hz;
+
+					camif_r_clk_updated = true;
+				}
+			} else if ((hw_mgr_res->res_id >=
+				CAM_ISP_HW_VFE_IN_RDI0) && (hw_mgr_res->res_id
+				<= CAM_ISP_HW_VFE_IN_RDI3))
+				for (j = 0; j < clock_config->num_rdi; j++)
+					clk_rate = max(clock_config->rdi_hz[j],
+						clk_rate);
+			else
+				if (hw_mgr_res->hw_res[i]) {
+					CAM_ERR(CAM_ISP, "Invalid res_id %u",
+						hw_mgr_res->res_id);
+					rc = -EINVAL;
+					return rc;
+				}
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf && hw_intf->hw_ops.process_cmd) {
+				clock_upd_args.node_res =
+					hw_mgr_res->hw_res[i];
+				CAM_DBG(CAM_ISP,
+				"res_id=%u i= %d clk=%llu\n",
+				hw_mgr_res->res_id, i, clk_rate);
+
+				clock_upd_args.clk_rate = clk_rate;
+
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_ISP_HW_CMD_CLOCK_UPDATE,
+					&clock_upd_args,
+					sizeof(
+					struct cam_vfe_clock_update_args));
+				if (rc)
+					CAM_ERR(CAM_ISP, "Clock Update failed");
+			} else
+				CAM_WARN(CAM_ISP, "NULL hw_intf!");
+		}
+	}
+
+	return rc;
+}
+
+static int cam_isp_packet_generic_blob_handler(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data)
+{
+	int rc = 0;
+	struct cam_isp_generic_blob_info  *blob_info = user_data;
+	struct cam_hw_prepare_update_args *prepare = NULL;
+
+	if (!blob_data || (blob_size == 0) || !blob_info) {
+		CAM_ERR(CAM_ISP, "Invalid info blob %pK %d prepare %pK",
+			blob_data, blob_size, prepare);
+		return -EINVAL;
+	}
+
+	if (blob_type >= CAM_ISP_GENERIC_BLOB_TYPE_MAX) {
+		CAM_ERR(CAM_ISP, "Invalid Blob Type %d Max %d", blob_type,
+			CAM_ISP_GENERIC_BLOB_TYPE_MAX);
+		return -EINVAL;
+	}
+
+	prepare = blob_info->prepare;
+	if (!prepare) {
+		CAM_ERR(CAM_ISP, "Failed. prepare is NULL, blob_type %d",
+			blob_type);
+		return -EINVAL;
+	}
+
+	switch (blob_type) {
+	case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: {
+		struct cam_isp_resource_hfr_config    *hfr_config =
+			(struct cam_isp_resource_hfr_config *)blob_data;
+
+		rc = cam_isp_blob_hfr_update(blob_type, blob_info,
+			hfr_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "HFR Update Failed");
+	}
+		break;
+	case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: {
+		struct cam_isp_clock_config    *clock_config =
+			(struct cam_isp_clock_config *)blob_data;
+
+		rc = cam_isp_blob_clock_update(blob_type, blob_info,
+			clock_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Clock Update Failed");
+	}
+		break;
+	case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: {
+		struct cam_isp_bw_config    *bw_config =
+			(struct cam_isp_bw_config *)blob_data;
+		struct cam_isp_prepare_hw_update_data   *prepare_hw_data;
+
+		if (!prepare || !prepare->priv ||
+			(bw_config->usage_type >= CAM_IFE_HW_NUM_MAX)) {
+			CAM_ERR(CAM_ISP, "Invalid inputs");
+			rc = -EINVAL;
+			break;
+		}
+
+		prepare_hw_data = (struct cam_isp_prepare_hw_update_data  *)
+			prepare->priv;
+
+		memcpy(&prepare_hw_data->bw_config[bw_config->usage_type],
+			bw_config, sizeof(prepare_hw_data->bw_config[0]));
+		prepare_hw_data->bw_config_valid[bw_config->usage_type] = true;
+
+	}
+		break;
+	case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG: {
+		struct cam_ubwc_config *ubwc_config =
+			(struct cam_ubwc_config *)blob_data;
+
+		rc = cam_isp_blob_ubwc_update(blob_type, blob_info,
+			ubwc_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "UBWC Update Failed rc: %d", rc);
+	}
+		break;
+	case CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: {
+		struct cam_isp_csid_clock_config    *clock_config =
+			(struct cam_isp_csid_clock_config *)blob_data;
+
+		rc = cam_isp_blob_csid_clock_update(blob_type, blob_info,
+			clock_config, prepare);
+		if (rc)
+			CAM_ERR(CAM_ISP, "Clock Update Failed");
+	}
+		break;
+	default:
+		CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_ife_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int rc = 0;
+	struct cam_hw_prepare_update_args *prepare =
+		(struct cam_hw_prepare_update_args *) prepare_hw_update_args;
+	struct cam_ife_hw_mgr_ctx               *ctx;
+	struct cam_ife_hw_mgr                   *hw_mgr;
+	struct cam_kmd_buf_info                  kmd_buf;
+	uint32_t                                 i;
+	bool                                     fill_fence = true;
+	struct cam_isp_prepare_hw_update_data   *prepare_hw_data;
+
+	if (!hw_mgr_priv || !prepare_hw_update_args) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_REQ, "Enter for req_id %lld",
+		prepare->packet->header.request_id);
+
+	prepare_hw_data = (struct cam_isp_prepare_hw_update_data  *)
+		prepare->priv;
+
+	ctx = (struct cam_ife_hw_mgr_ctx *) prepare->ctxt_to_hw_map;
+	hw_mgr = (struct cam_ife_hw_mgr *)hw_mgr_priv;
+
+	rc = cam_packet_util_validate_packet(prepare->packet);
+	if (rc)
+		return rc;
+
+	/* Pre parse the packet*/
+	rc = cam_packet_util_get_kmd_buffer(prepare->packet, &kmd_buf);
+	if (rc)
+		return rc;
+
+	rc = cam_packet_util_process_patches(prepare->packet,
+		hw_mgr->mgr_common.cmd_iommu_hdl,
+		hw_mgr->mgr_common.cmd_iommu_hdl_secure);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Patch ISP packet failed.");
+		return rc;
+	}
+
+	prepare->num_hw_update_entries = 0;
+	prepare->num_in_map_entries = 0;
+	prepare->num_out_map_entries = 0;
+
+	memset(&prepare_hw_data->bw_config[0], 0x0,
+		sizeof(prepare_hw_data->bw_config[0]) *
+		CAM_IFE_HW_NUM_MAX);
+	memset(&prepare_hw_data->bw_config_valid[0], 0x0,
+		sizeof(prepare_hw_data->bw_config_valid[0]) *
+		CAM_IFE_HW_NUM_MAX);
+
+	for (i = 0; i < ctx->num_base; i++) {
+		CAM_DBG(CAM_ISP, "process cmd buffer for device %d", i);
+
+		/* Add change base */
+		rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Failed in change base i=%d, idx=%d, rc=%d",
+				i, ctx->base[i].idx, rc);
+			goto end;
+		}
+
+
+		/* get command buffers */
+		if (ctx->base[i].split_id != CAM_ISP_HW_SPLIT_MAX) {
+			rc = cam_isp_add_command_buffers(prepare, &kmd_buf,
+				&ctx->base[i],
+				cam_isp_packet_generic_blob_handler,
+				ctx->res_list_ife_out, CAM_IFE_HW_OUT_RES_MAX);
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Failed in add cmdbuf, i=%d, split_id=%d, rc=%d",
+					i, ctx->base[i].split_id, rc);
+				goto end;
+			}
+		}
+
+		/* get IO buffers */
+		rc = cam_isp_add_io_buffers(hw_mgr->mgr_common.img_iommu_hdl,
+			hw_mgr->mgr_common.img_iommu_hdl_secure,
+			prepare, ctx->base[i].idx,
+			&kmd_buf, ctx->res_list_ife_out,
+			CAM_IFE_HW_OUT_RES_MAX, fill_fence);
+
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Failed in io buffers, i=%d, rc=%d",
+				i, rc);
+			goto end;
+		}
+
+		/* fence map table entries need to fill only once in the loop */
+		if (fill_fence)
+			fill_fence = false;
+	}
+
+	/*
+	 * reg update will be done later for the initial configure.
+	 * need to plus one to the op_code and only take the lower
+	 * bits to get the type of operation since UMD definition
+	 * of op_code has some difference from KMD.
+	 */
+	if (((prepare->packet->header.op_code + 1) & 0xF) ==
+		CAM_ISP_PACKET_INIT_DEV) {
+		prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_INIT_DEV;
+		goto end;
+	} else
+		prepare_hw_data->packet_opcode_type = CAM_ISP_PACKET_UPDATE_DEV;
+
+	/* add reg update commands */
+	for (i = 0; i < ctx->num_base; i++) {
+		/* Add change base */
+		rc = cam_isp_add_change_base(prepare, &ctx->res_list_ife_src,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Failed in change base adding reg_update cmd i=%d, idx=%d, rc=%d",
+				i, ctx->base[i].idx, rc);
+			goto end;
+		}
+
+		/*Add reg update */
+		rc = cam_isp_add_reg_update(prepare, &ctx->res_list_ife_src,
+			ctx->base[i].idx, &kmd_buf);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"Add Reg_update cmd Failed i=%d, idx=%d, rc=%d",
+				i, ctx->base[i].idx, rc);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_ife_mgr_resume_hw(struct cam_ife_hw_mgr_ctx *ctx)
+{
+	return cam_ife_mgr_bw_control(ctx, CAM_VFE_BW_CONTROL_INCLUDE);
+}
+
+static int cam_ife_mgr_sof_irq_debug(
+	struct cam_ife_hw_mgr_ctx *ctx,
+	uint32_t sof_irq_enable)
+{
+	int rc = 0;
+	uint32_t i = 0;
+	struct cam_ife_hw_mgr_res     *hw_mgr_res = NULL;
+	struct cam_hw_intf            *hw_intf = NULL;
+	struct cam_isp_resource_node  *rsrc_node = NULL;
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf->hw_ops.process_cmd) {
+				rc |= hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_IFE_CSID_SOF_IRQ_DEBUG,
+					&sof_irq_enable,
+					sizeof(sof_irq_enable));
+			}
+		}
+	}
+
+	list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			rsrc_node = hw_mgr_res->hw_res[i];
+			if (rsrc_node->process_cmd && (rsrc_node->res_id ==
+				CAM_ISP_HW_VFE_IN_CAMIF)) {
+				rc |= hw_mgr_res->hw_res[i]->process_cmd(
+					hw_mgr_res->hw_res[i],
+					CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
+					&sof_irq_enable,
+					sizeof(sof_irq_enable));
+			}
+		}
+	}
+
+	return rc;
+}
+
+static void cam_ife_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	uint64_t   iova_addr;
+	size_t     src_buf_size;
+	int        i;
+	int        j;
+	int        rc = 0;
+	int32_t    mmu_hdl;
+
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_ISP,
+					"Found PF at port: %d mem %x fd: %x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_ISP, "port: %d f: %u format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_ISP, "get src buf address fail");
+				continue;
+			}
+			if (iova_addr >> 32) {
+				CAM_ERR(CAM_ISP, "Invalid mapped address");
+				rc = -EINVAL;
+				continue;
+			}
+
+			CAM_INFO(CAM_ISP,
+				"pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+				j, io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				(int32_t)src_buf_size,
+				(unsigned int)iova_addr,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+		}
+	}
+}
+
+static int cam_ife_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_ife_hw_mgr  *hw_mgr = hw_mgr_priv;
+	struct cam_ife_hw_mgr_ctx *ctx = (struct cam_ife_hw_mgr_ctx *)
+		hw_cmd_args->ctxt_to_hw_map;
+	struct cam_isp_hw_cmd_args *isp_hw_cmd_args = NULL;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (!ctx || !ctx->ctx_in_use) {
+		CAM_ERR(CAM_ISP, "Fatal: Invalid context is used");
+		return -EPERM;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_HW_MGR_CMD_INTERNAL:
+		if (!hw_cmd_args->u.internal_args) {
+			CAM_ERR(CAM_ISP, "Invalid cmd arguments");
+			return -EINVAL;
+		}
+
+		isp_hw_cmd_args = (struct cam_isp_hw_cmd_args *)
+			hw_cmd_args->u.internal_args;
+
+		switch (isp_hw_cmd_args->cmd_type) {
+		case CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT:
+			if (ctx->is_rdi_only_context)
+				isp_hw_cmd_args->u.is_rdi_only_context = 1;
+			else
+				isp_hw_cmd_args->u.is_rdi_only_context = 0;
+			break;
+		case CAM_ISP_HW_MGR_CMD_PAUSE_HW:
+			cam_ife_mgr_pause_hw(ctx);
+			break;
+		case CAM_ISP_HW_MGR_CMD_RESUME_HW:
+			cam_ife_mgr_resume_hw(ctx);
+			break;
+		case CAM_ISP_HW_MGR_CMD_SOF_DEBUG:
+			cam_ife_mgr_sof_irq_debug(ctx,
+				isp_hw_cmd_args->u.sof_irq_enable);
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid HW mgr command:0x%x",
+				hw_cmd_args->cmd_type);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_ife_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->mgr_common.img_iommu_hdl,
+			hw_mgr->mgr_common.img_iommu_hdl_secure,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Invalid cmd");
+	}
+
+	return rc;
+}
+
+static int cam_ife_mgr_cmd_get_sof_timestamp(
+	struct cam_ife_hw_mgr_ctx      *ife_ctx,
+	uint64_t                       *time_stamp,
+	uint64_t                       *boot_time_stamp)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+	struct cam_ife_hw_mgr_res            *hw_mgr_res;
+	struct cam_hw_intf                   *hw_intf;
+	struct cam_csid_get_time_stamp_args   csid_get_time;
+
+	list_for_each_entry(hw_mgr_res, &ife_ctx->res_list_ife_csid, list) {
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i] ||
+				(i == CAM_ISP_HW_SPLIT_RIGHT))
+				continue;
+			/*
+			 * Get the SOF time stamp from left resource only.
+			 * Left resource is master for dual vfe case and
+			 * Rdi only context case left resource only hold
+			 * the RDI resource
+			 */
+			hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+			if (hw_intf->hw_ops.process_cmd) {
+				csid_get_time.node_res =
+					hw_mgr_res->hw_res[i];
+				rc = hw_intf->hw_ops.process_cmd(
+					hw_intf->hw_priv,
+					CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+					&csid_get_time,
+					sizeof(
+					struct cam_csid_get_time_stamp_args));
+				if (!rc) {
+					*time_stamp =
+						csid_get_time.time_stamp_val;
+					*boot_time_stamp =
+						csid_get_time.boot_timestamp;
+				}
+			/*
+			 * Single VFE case, Get the time stamp from available
+			 * one csid hw in the context
+			 * Dual VFE case, get the time stamp from master(left)
+			 * would be sufficient
+			 */
+				goto end;
+			}
+		}
+	}
+end:
+	if (rc)
+		CAM_ERR(CAM_ISP, "Getting sof time stamp failed");
+
+	return rc;
+}
+
+static int cam_ife_mgr_process_recovery_cb(void *priv, void *data)
+{
+	int32_t rc = 0;
+	struct cam_hw_event_recovery_data   *recovery_data = data;
+	struct cam_hw_start_args             start_args;
+	struct cam_hw_stop_args              stop_args;
+	struct cam_ife_hw_mgr               *ife_hw_mgr = priv;
+	struct cam_ife_hw_mgr_res           *hw_mgr_res;
+	uint32_t                             i = 0;
+
+	uint32_t error_type = recovery_data->error_type;
+	struct cam_ife_hw_mgr_ctx        *ctx = NULL;
+
+	/* Here recovery is performed */
+	CAM_DBG(CAM_ISP, "ErrorType = %d", error_type);
+
+	switch (error_type) {
+	case CAM_ISP_HW_ERROR_OVERFLOW:
+	case CAM_ISP_HW_ERROR_BUSIF_OVERFLOW:
+		if (!recovery_data->affected_ctx[0]) {
+			CAM_ERR(CAM_ISP,
+				"No context is affected but recovery called");
+			kfree(recovery_data);
+			return 0;
+		}
+		/* stop resources here */
+		CAM_DBG(CAM_ISP, "STOP: Number of affected context: %d",
+			recovery_data->no_of_context);
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			stop_args.ctxt_to_hw_map =
+				recovery_data->affected_ctx[i];
+			rc = cam_ife_mgr_stop_hw_in_overflow(&stop_args);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "CTX stop failed(%d)", rc);
+				return rc;
+			}
+		}
+
+		CAM_DBG(CAM_ISP, "RESET: CSID PATH");
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			ctx = recovery_data->affected_ctx[i];
+			list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_csid,
+				list) {
+				rc = cam_ife_hw_mgr_reset_csid_res(hw_mgr_res);
+				if (rc) {
+					CAM_ERR(CAM_ISP, "Failed RESET (%d)",
+						hw_mgr_res->res_id);
+					return rc;
+				}
+			}
+		}
+
+		CAM_DBG(CAM_ISP, "RESET: Calling VFE reset");
+
+		for (i = 0; i < CAM_VFE_HW_NUM_MAX; i++) {
+			if (recovery_data->affected_core[i])
+				cam_ife_mgr_reset_vfe_hw(ife_hw_mgr, i);
+		}
+
+		CAM_DBG(CAM_ISP, "START: Number of affected context: %d",
+			recovery_data->no_of_context);
+
+		for (i = 0; i < recovery_data->no_of_context; i++) {
+			ctx =  recovery_data->affected_ctx[i];
+			start_args.ctxt_to_hw_map = ctx;
+
+			atomic_set(&ctx->overflow_pending, 0);
+
+			rc = cam_ife_mgr_restart_hw(&start_args);
+			if (rc) {
+				CAM_ERR(CAM_ISP, "CTX start failed(%d)", rc);
+				return rc;
+			}
+			CAM_DBG(CAM_ISP, "Started resources rc (%d)", rc);
+		}
+		CAM_DBG(CAM_ISP, "Recovery Done rc (%d)", rc);
+
+		break;
+
+	case CAM_ISP_HW_ERROR_P2I_ERROR:
+		break;
+
+	case CAM_ISP_HW_ERROR_VIOLATION:
+		break;
+
+	default:
+		CAM_ERR(CAM_ISP, "Invalid Error");
+	}
+	CAM_DBG(CAM_ISP, "Exit: ErrorType = %d", error_type);
+
+	kfree(recovery_data);
+	return rc;
+}
+
+static int cam_ife_hw_mgr_do_error_recovery(
+		struct cam_hw_event_recovery_data  *ife_mgr_recovery_data)
+{
+	int32_t rc = 0;
+	struct crm_workq_task        *task = NULL;
+	struct cam_hw_event_recovery_data  *recovery_data = NULL;
+
+	recovery_data = kzalloc(sizeof(struct cam_hw_event_recovery_data),
+		GFP_ATOMIC);
+	if (!recovery_data)
+		return -ENOMEM;
+
+	memcpy(recovery_data, ife_mgr_recovery_data,
+			sizeof(struct cam_hw_event_recovery_data));
+
+	CAM_DBG(CAM_ISP, "Enter: error_type (%d)", recovery_data->error_type);
+
+	task = cam_req_mgr_workq_get_task(g_ife_hw_mgr.workq);
+	if (!task) {
+		CAM_ERR(CAM_ISP, "No empty task frame");
+		kfree(recovery_data);
+		return -ENOMEM;
+	}
+
+	task->process_cb = &cam_ife_mgr_process_recovery_cb;
+	task->payload = recovery_data;
+	rc = cam_req_mgr_workq_enqueue_task(task,
+		recovery_data->affected_ctx[0]->hw_mgr,
+		CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+/*
+ * This function checks if any of the valid entry in affected_core[]
+ * is associated with this context. if YES
+ *  a. It fills the other cores associated with this context.in
+ *      affected_core[]
+ *  b. Return 1 if ctx is affected, 0 otherwise
+ */
+static int cam_ife_hw_mgr_is_ctx_affected(
+	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx,
+	uint32_t *affected_core, uint32_t size)
+{
+	int32_t rc = 0;
+	uint32_t i = 0, j = 0;
+	uint32_t max_idx =  ife_hwr_mgr_ctx->num_base;
+	uint32_t ctx_affected_core_idx[CAM_IFE_HW_NUM_MAX] = {0};
+
+	CAM_DBG(CAM_ISP, "max_idx = %d", max_idx);
+
+	if ((max_idx >= CAM_IFE_HW_NUM_MAX) ||
+		(size > CAM_IFE_HW_NUM_MAX)) {
+		CAM_ERR(CAM_ISP, "invalid parameter = %d", max_idx);
+		return rc;
+	}
+
+	for (i = 0; i < max_idx; i++) {
+		if (affected_core[ife_hwr_mgr_ctx->base[i].idx])
+			rc = 1;
+		else {
+			ctx_affected_core_idx[j] = ife_hwr_mgr_ctx->base[i].idx;
+			CAM_DBG(CAM_ISP, "Add affected IFE %d for recovery",
+				ctx_affected_core_idx[j]);
+			j = j + 1;
+		}
+	}
+
+	if (rc == 1) {
+		while (j) {
+			if (affected_core[ctx_affected_core_idx[j-1]] != 1)
+				affected_core[ctx_affected_core_idx[j-1]] = 1;
+			j = j - 1;
+		}
+	}
+
+	return rc;
+}
+
+/*
+ * For any dual VFE context, if non-affected VFE is also serving
+ * another context, then that context should also be notified with fatal error
+ * So Loop through each context and -
+ *   a. match core_idx
+ *   b. Notify CTX with fatal error
+ */
+static int  cam_ife_hw_mgr_find_affected_ctx(
+	struct cam_ife_hw_mgr_ctx             *curr_ife_hwr_mgr_ctx,
+	struct cam_isp_hw_error_event_data    *error_event_data,
+	uint32_t                               curr_core_idx,
+	struct cam_hw_event_recovery_data     *recovery_data)
+{
+	uint32_t affected_core[CAM_IFE_HW_NUM_MAX] = {0};
+	struct cam_ife_hw_mgr_ctx   *ife_hwr_mgr_ctx = NULL;
+	cam_hw_event_cb_func         notify_err_cb;
+	struct cam_ife_hw_mgr       *ife_hwr_mgr = NULL;
+	enum cam_isp_hw_event_type   event_type = CAM_ISP_HW_EVENT_ERROR;
+	uint32_t i = 0;
+
+	if (!recovery_data) {
+		CAM_ERR(CAM_ISP, "recovery_data parameter is NULL");
+		return -EINVAL;
+	}
+
+	recovery_data->no_of_context = 0;
+	affected_core[curr_core_idx] = 1;
+	ife_hwr_mgr = curr_ife_hwr_mgr_ctx->hw_mgr;
+
+	list_for_each_entry(ife_hwr_mgr_ctx,
+		&ife_hwr_mgr->used_ctx_list, list) {
+		/*
+		 * Check if current core_idx matches the HW associated
+		 * with this context
+		 */
+		if (!cam_ife_hw_mgr_is_ctx_affected(ife_hwr_mgr_ctx,
+			affected_core, CAM_IFE_HW_NUM_MAX))
+			continue;
+
+		atomic_set(&ife_hwr_mgr_ctx->overflow_pending, 1);
+		notify_err_cb = ife_hwr_mgr_ctx->common.event_cb[event_type];
+
+		/* Add affected_context in list of recovery data */
+		CAM_DBG(CAM_ISP, "Add affected ctx %d to list",
+			ife_hwr_mgr_ctx->ctx_index);
+		if (recovery_data->no_of_context < CAM_CTX_MAX)
+			recovery_data->affected_ctx[
+				recovery_data->no_of_context++] =
+				ife_hwr_mgr_ctx;
+
+		/*
+		 * In the call back function corresponding ISP context
+		 * will update CRM about fatal Error
+		 */
+		notify_err_cb(ife_hwr_mgr_ctx->common.cb_priv,
+			CAM_ISP_HW_EVENT_ERROR, error_event_data);
+	}
+
+	/* fill the affected_core in recovery data */
+	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		recovery_data->affected_core[i] = affected_core[i];
+		CAM_DBG(CAM_ISP, "Vfe core %d is affected (%d)",
+			 i, recovery_data->affected_core[i]);
+	}
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_get_err_type(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	struct cam_isp_resource_node         *hw_res_l = NULL;
+	struct cam_isp_resource_node         *hw_res_r = NULL;
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
+	uint32_t  status = 0;
+	uint32_t  core_idx;
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+
+	if (!evt_payload) {
+		CAM_ERR(CAM_ISP, "No payload");
+		return IRQ_HANDLED;
+	}
+
+	core_idx = evt_payload->core_index;
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_ERROR;
+
+	list_for_each_entry(isp_ife_camif_res,
+		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+		if ((isp_ife_camif_res->res_type ==
+			CAM_IFE_HW_MGR_RES_UNINIT) ||
+			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
+			continue;
+
+		hw_res_l = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_LEFT];
+		hw_res_r = isp_ife_camif_res->hw_res[CAM_ISP_HW_SPLIT_RIGHT];
+
+		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d\n",
+			isp_ife_camif_res->is_dual_vfe);
+
+		/* ERROR check for Left VFE */
+		if (!hw_res_l) {
+			CAM_DBG(CAM_ISP, "VFE(L) Device is NULL");
+			break;
+		}
+
+		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+			hw_res_l->hw_intf->hw_idx);
+
+		if (core_idx == hw_res_l->hw_intf->hw_idx) {
+			status = hw_res_l->bottom_half_handler(
+				hw_res_l, evt_payload);
+		}
+
+		if (status)
+			break;
+
+		/* ERROR check for Right  VFE */
+		if (!hw_res_r) {
+			CAM_DBG(CAM_ISP, "VFE(R) Device is NULL");
+			continue;
+		}
+		CAM_DBG(CAM_ISP, "core id= %d, HW id %d", core_idx,
+			hw_res_r->hw_intf->hw_idx);
+
+		if (core_idx == hw_res_r->hw_intf->hw_idx) {
+			status = hw_res_r->bottom_half_handler(
+				hw_res_r, evt_payload);
+		}
+
+		if (status)
+			break;
+	}
+	CAM_DBG(CAM_ISP, "Exit (status = %d)!", status);
+	return status;
+}
+
+static int  cam_ife_hw_mgr_handle_camif_error(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	int32_t  error_status;
+	uint32_t core_idx;
+	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload      *evt_payload;
+	struct cam_isp_hw_error_event_data       error_event_data = {0};
+	struct cam_hw_event_recovery_data        recovery_data = {0};
+	int rc = 0;
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+	core_idx = evt_payload->core_index;
+
+	error_status = cam_ife_hw_mgr_get_err_type(ife_hwr_mgr_ctx,
+		evt_payload);
+
+	if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+		return error_status;
+
+	switch (error_status) {
+	case CAM_ISP_HW_ERROR_OVERFLOW:
+	case CAM_ISP_HW_ERROR_P2I_ERROR:
+	case CAM_ISP_HW_ERROR_VIOLATION:
+		CAM_ERR(CAM_ISP, "Enter: error_type (%d)", error_status);
+		rc = -EFAULT;
+
+		if (g_ife_hw_mgr.debug_cfg.enable_recovery)
+			error_event_data.recovery_enabled = true;
+
+		error_event_data.error_type =
+				CAM_ISP_HW_ERROR_OVERFLOW;
+
+		cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
+			&error_event_data,
+			core_idx,
+			&recovery_data);
+
+		if (!g_ife_hw_mgr.debug_cfg.enable_recovery) {
+			CAM_DBG(CAM_ISP, "recovery is not enabled");
+			break;
+		}
+
+		CAM_DBG(CAM_ISP, "IFE Mgr recovery is enabled");
+		/* Trigger for recovery */
+		recovery_data.error_type = CAM_ISP_HW_ERROR_OVERFLOW;
+		cam_ife_hw_mgr_do_error_recovery(&recovery_data);
+		break;
+	default:
+		CAM_DBG(CAM_ISP, "No error (%d)", error_status);
+		break;
+	}
+
+	return rc;
+}
+
+/*
+ * DUAL VFE is valid for PIX processing path
+ * This function assumes hw_res[0] is master in case
+ * of dual VFE.
+ * RDI path does not support DUAl VFE
+ */
+static int cam_ife_hw_mgr_handle_reg_update(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	struct cam_isp_resource_node            *hw_res;
+	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload      *evt_payload;
+	struct cam_ife_hw_mgr_res               *ife_src_res = NULL;
+	cam_hw_event_cb_func                     ife_hwr_irq_rup_cb;
+	struct cam_isp_hw_reg_update_event_data  rup_event_data;
+	uint32_t  core_idx;
+	uint32_t  rup_status = -EINVAL;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+
+	if (!handler_priv || !payload) {
+		CAM_ERR(CAM_ISP, "Invalid Parameter");
+		return -EPERM;
+	}
+
+	core_idx = evt_payload->core_index;
+	ife_hwr_irq_rup_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_REG_UPDATE];
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_REG_UPDATE;
+	list_for_each_entry(ife_src_res,
+			&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		CAM_DBG(CAM_ISP, "resource id = %d, curr_core_idx = %d",
+			 ife_src_res->res_id, core_idx);
+		switch (ife_src_res->res_id) {
+		case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
+			break;
+		case CAM_ISP_HW_VFE_IN_CAMIF:
+			if (ife_src_res->is_dual_vfe)
+				/* It checks for slave core RUP ACK*/
+				hw_res = ife_src_res->hw_res[1];
+			else
+				hw_res = ife_src_res->hw_res[0];
+
+			if (!hw_res) {
+				CAM_ERR(CAM_ISP, "CAMIF device is NULL");
+				break;
+			}
+			CAM_DBG(CAM_ISP,
+				"current_core_id = %d , core_idx res = %d",
+				 core_idx, hw_res->hw_intf->hw_idx);
+
+			if (core_idx == hw_res->hw_intf->hw_idx) {
+				rup_status = hw_res->bottom_half_handler(
+					hw_res, evt_payload);
+			}
+
+			if (ife_src_res->is_dual_vfe) {
+				hw_res = ife_src_res->hw_res[0];
+				if (core_idx == hw_res->hw_intf->hw_idx) {
+					hw_res->bottom_half_handler(
+						hw_res, evt_payload);
+				}
+			}
+
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
+
+			if (!rup_status) {
+				ife_hwr_irq_rup_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_REG_UPDATE,
+					&rup_event_data);
+			}
+			break;
+
+		case CAM_ISP_HW_VFE_IN_RDI0:
+		case CAM_ISP_HW_VFE_IN_RDI1:
+		case CAM_ISP_HW_VFE_IN_RDI2:
+		case CAM_ISP_HW_VFE_IN_RDI3:
+			hw_res = ife_src_res->hw_res[0];
+
+			if (!hw_res) {
+				CAM_ERR(CAM_ISP, "RDI Device is NULL");
+				break;
+			}
+
+			if (core_idx == hw_res->hw_intf->hw_idx)
+				rup_status = hw_res->bottom_half_handler(
+					hw_res, evt_payload);
+
+			if (!ife_hwr_mgr_ctx->is_rdi_only_context)
+				continue;
+
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
+			if (!rup_status) {
+				/* Send the Reg update hw event */
+				ife_hwr_irq_rup_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_REG_UPDATE,
+					&rup_event_data);
+			}
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid resource id (%d)",
+				ife_src_res->res_id);
+		}
+
+	}
+
+	if (!rup_status)
+		CAM_DBG(CAM_ISP, "Exit rup_status = %d", rup_status);
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_check_irq_for_dual_vfe(
+	struct cam_ife_hw_mgr_ctx   *ife_hw_mgr_ctx,
+	uint32_t                     core_idx0,
+	uint32_t                     core_idx1,
+	uint32_t                     hw_event_type)
+{
+	int32_t rc = -1;
+	uint32_t *event_cnt = NULL;
+
+	switch (hw_event_type) {
+	case CAM_ISP_HW_EVENT_SOF:
+		event_cnt = ife_hw_mgr_ctx->sof_cnt;
+		break;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		event_cnt = ife_hw_mgr_ctx->epoch_cnt;
+		break;
+	case CAM_ISP_HW_EVENT_EOF:
+		event_cnt = ife_hw_mgr_ctx->eof_cnt;
+		break;
+	default:
+		return 0;
+	}
+
+	if (event_cnt[core_idx0] ==
+			event_cnt[core_idx1]) {
+
+		event_cnt[core_idx0] = 0;
+		event_cnt[core_idx1] = 0;
+
+		rc = 0;
+		return rc;
+	}
+
+	if ((event_cnt[core_idx0] &&
+		(event_cnt[core_idx0] - event_cnt[core_idx1] > 1)) ||
+		(event_cnt[core_idx1] &&
+		(event_cnt[core_idx1] - event_cnt[core_idx0] > 1))) {
+
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"One of the VFE could not generate hw event %d",
+			hw_event_type);
+		rc = -1;
+		return rc;
+	}
+
+	CAM_DBG(CAM_ISP, "Only one core_index has given hw event %d",
+			hw_event_type);
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	int32_t rc = -EINVAL;
+	struct cam_isp_resource_node         *hw_res_l;
+	struct cam_isp_resource_node         *hw_res_r;
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
+	cam_hw_event_cb_func                  ife_hwr_irq_epoch_cb;
+	struct cam_isp_hw_epoch_event_data    epoch_done_event_data;
+	uint32_t  core_idx;
+	uint32_t  epoch_status = -EINVAL;
+	uint32_t  core_index0;
+	uint32_t  core_index1;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+	ife_hwr_irq_epoch_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EPOCH];
+	core_idx = evt_payload->core_index;
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_EPOCH;
+
+	list_for_each_entry(isp_ife_camif_res,
+		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+		if ((isp_ife_camif_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			|| (isp_ife_camif_res->res_id !=
+			CAM_ISP_HW_VFE_IN_CAMIF))
+			continue;
+
+		hw_res_l = isp_ife_camif_res->hw_res[0];
+		hw_res_r = isp_ife_camif_res->hw_res[1];
+
+		switch (isp_ife_camif_res->is_dual_vfe) {
+		/* Handling Single VFE Scenario */
+		case 0:
+			/* EPOCH check for Left side VFE */
+			if (!hw_res_l) {
+				CAM_ERR(CAM_ISP, "Left Device is NULL");
+				break;
+			}
+
+			if (core_idx == hw_res_l->hw_intf->hw_idx) {
+				epoch_status = hw_res_l->bottom_half_handler(
+					hw_res_l, evt_payload);
+				if (atomic_read(
+					&ife_hwr_mgr_ctx->overflow_pending))
+					break;
+				if (!epoch_status)
+					ife_hwr_irq_epoch_cb(
+						ife_hwr_mgr_ctx->common.cb_priv,
+						CAM_ISP_HW_EVENT_EPOCH,
+						&epoch_done_event_data);
+			}
+
+			break;
+
+		/* Handling Dual VFE Scenario */
+		case 1:
+			/* SOF check for Left side VFE (Master)*/
+
+			if ((!hw_res_l) || (!hw_res_r)) {
+				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
+				break;
+			}
+			if (core_idx == hw_res_l->hw_intf->hw_idx) {
+				epoch_status = hw_res_l->bottom_half_handler(
+					hw_res_l, evt_payload);
+
+				if (!epoch_status)
+					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
+				else
+					break;
+			}
+
+			/* SOF check for Right side VFE */
+			if (core_idx == hw_res_r->hw_intf->hw_idx) {
+				epoch_status = hw_res_r->bottom_half_handler(
+					hw_res_r, evt_payload);
+
+				if (!epoch_status)
+					ife_hwr_mgr_ctx->epoch_cnt[core_idx]++;
+				else
+					break;
+			}
+
+			core_index0 = hw_res_l->hw_intf->hw_idx;
+			core_index1 = hw_res_r->hw_intf->hw_idx;
+
+			rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
+					ife_hwr_mgr_ctx,
+					core_index0,
+					core_index1,
+					evt_payload->evt_id);
+
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
+			if (!rc)
+				ife_hwr_irq_epoch_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_EPOCH,
+					&epoch_done_event_data);
+
+			break;
+
+		/* Error */
+		default:
+			CAM_ERR(CAM_ISP, "error with hw_res");
+
+		}
+	}
+
+	if (!epoch_status)
+		CAM_DBG(CAM_ISP, "Exit epoch_status = %d", epoch_status);
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_process_camif_sof(
+	struct cam_ife_hw_mgr_res            *isp_ife_camif_res,
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx,
+	struct cam_vfe_top_irq_evt_payload   *evt_payload)
+{
+	struct cam_isp_resource_node         *hw_res_l = NULL;
+	struct cam_isp_resource_node         *hw_res_r = NULL;
+	int32_t rc = -EINVAL;
+	uint32_t  core_idx;
+	uint32_t  sof_status = 0;
+	uint32_t  core_index0;
+	uint32_t  core_index1;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	core_idx = evt_payload->core_index;
+	hw_res_l = isp_ife_camif_res->hw_res[0];
+	hw_res_r = isp_ife_camif_res->hw_res[1];
+	CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
+		isp_ife_camif_res->is_dual_vfe);
+
+	switch (isp_ife_camif_res->is_dual_vfe) {
+	/* Handling Single VFE Scenario */
+	case 0:
+		/* SOF check for Left side VFE */
+		if (!hw_res_l) {
+			CAM_ERR(CAM_ISP, "VFE Device is NULL");
+			break;
+		}
+		CAM_DBG(CAM_ISP, "curr_core_idx = %d,core idx hw = %d",
+			core_idx, hw_res_l->hw_intf->hw_idx);
+
+		if (core_idx == hw_res_l->hw_intf->hw_idx) {
+			sof_status = hw_res_l->bottom_half_handler(hw_res_l,
+				evt_payload);
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
+			if (!sof_status)
+				rc = 0;
+		}
+
+		break;
+
+	/* Handling Dual VFE Scenario */
+	case 1:
+		/* SOF check for Left side VFE */
+
+		if (!hw_res_l) {
+			CAM_ERR(CAM_ISP, "VFE Device is NULL");
+			break;
+		}
+		CAM_DBG(CAM_ISP, "curr_core_idx = %d, res hw idx= %d",
+				 core_idx,
+				hw_res_l->hw_intf->hw_idx);
+
+		if (core_idx == hw_res_l->hw_intf->hw_idx) {
+			sof_status = hw_res_l->bottom_half_handler(
+				hw_res_l, evt_payload);
+			if (!sof_status)
+				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+			else
+				break;
+		}
+
+		/* SOF check for Right side VFE */
+		if (!hw_res_r) {
+			CAM_ERR(CAM_ISP, "VFE Device is NULL");
+			break;
+		}
+		CAM_DBG(CAM_ISP, "curr_core_idx = %d, ews hw idx= %d",
+				 core_idx,
+				hw_res_r->hw_intf->hw_idx);
+		if (core_idx == hw_res_r->hw_intf->hw_idx) {
+			sof_status = hw_res_r->bottom_half_handler(hw_res_r,
+				evt_payload);
+			if (!sof_status)
+				ife_hwr_mgr_ctx->sof_cnt[core_idx]++;
+			else
+				break;
+		}
+
+		core_index0 = hw_res_l->hw_intf->hw_idx;
+		core_index1 = hw_res_r->hw_intf->hw_idx;
+
+		if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+			break;
+
+		rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(ife_hwr_mgr_ctx,
+			core_index0, core_index1, evt_payload->evt_id);
+
+		break;
+
+	default:
+		CAM_ERR(CAM_ISP, "error with hw_res");
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "Exit (sof_status = %d)", sof_status);
+
+	return rc;
+}
+
+static int cam_ife_hw_mgr_handle_sof(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	struct cam_isp_resource_node         *hw_res = NULL;
+	struct cam_ife_hw_mgr_ctx            *ife_hw_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	struct cam_ife_hw_mgr_res            *ife_src_res = NULL;
+	cam_hw_event_cb_func                  ife_hw_irq_sof_cb;
+	struct cam_isp_hw_sof_event_data      sof_done_event_data;
+	uint32_t  sof_status = 0;
+	bool sof_sent = false;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	ife_hw_mgr_ctx = handler_priv;
+	evt_payload = payload;
+	if (!evt_payload) {
+		CAM_ERR(CAM_ISP, "no payload");
+		return IRQ_HANDLED;
+	}
+	ife_hw_irq_sof_cb =
+		ife_hw_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_SOF];
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_SOF;
+
+	list_for_each_entry(ife_src_res,
+		&ife_hw_mgr_ctx->res_list_ife_src, list) {
+
+		if (ife_src_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		switch (ife_src_res->res_id) {
+		case CAM_ISP_HW_VFE_IN_RDI0:
+		case CAM_ISP_HW_VFE_IN_RDI1:
+		case CAM_ISP_HW_VFE_IN_RDI2:
+		case CAM_ISP_HW_VFE_IN_RDI3:
+			hw_res = ife_src_res->hw_res[0];
+			sof_status = hw_res->bottom_half_handler(
+				hw_res, evt_payload);
+
+			/* check if it is rdi only context */
+			if (ife_hw_mgr_ctx->is_rdi_only_context) {
+				if (!sof_status && !sof_sent) {
+					cam_ife_mgr_cmd_get_sof_timestamp(
+						ife_hw_mgr_ctx,
+						&sof_done_event_data.timestamp,
+						&sof_done_event_data.boot_time);
+
+					ife_hw_irq_sof_cb(
+						ife_hw_mgr_ctx->common.cb_priv,
+						CAM_ISP_HW_EVENT_SOF,
+						&sof_done_event_data);
+					CAM_DBG(CAM_ISP, "sof_status = %d",
+						sof_status);
+
+					sof_sent = true;
+				}
+
+			}
+			break;
+
+		case CAM_ISP_HW_VFE_IN_CAMIF:
+			sof_status = cam_ife_hw_mgr_process_camif_sof(
+				ife_src_res, ife_hw_mgr_ctx, evt_payload);
+			if (!sof_status && !sof_sent) {
+				cam_ife_mgr_cmd_get_sof_timestamp(
+					ife_hw_mgr_ctx,
+					&sof_done_event_data.timestamp,
+					&sof_done_event_data.boot_time);
+
+				ife_hw_irq_sof_cb(
+					ife_hw_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_SOF,
+					&sof_done_event_data);
+				CAM_DBG(CAM_ISP, "sof_status = %d",
+					sof_status);
+
+				sof_sent = true;
+			}
+			break;
+		case CAM_ISP_HW_VFE_IN_CAMIF_LITE:
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid resource id :%d",
+				ife_src_res->res_id);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static int cam_ife_hw_mgr_handle_eof_for_camif_hw_res(
+	void                              *handler_priv,
+	void                              *payload)
+{
+	int32_t rc = -EINVAL;
+	struct cam_isp_resource_node         *hw_res_l = NULL;
+	struct cam_isp_resource_node         *hw_res_r = NULL;
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	struct cam_ife_hw_mgr_res            *isp_ife_camif_res = NULL;
+	cam_hw_event_cb_func                  ife_hwr_irq_eof_cb;
+	struct cam_isp_hw_eof_event_data      eof_done_event_data;
+	uint32_t  core_idx;
+	uint32_t  eof_status = 0;
+	uint32_t  core_index0;
+	uint32_t  core_index1;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	ife_hwr_mgr_ctx = handler_priv;
+	evt_payload = payload;
+	if (!evt_payload) {
+		pr_err("%s: no payload\n", __func__);
+		return IRQ_HANDLED;
+	}
+	core_idx = evt_payload->core_index;
+	ife_hwr_irq_eof_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_EOF];
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_EOF;
+
+	list_for_each_entry(isp_ife_camif_res,
+		&ife_hwr_mgr_ctx->res_list_ife_src, list) {
+
+		if ((isp_ife_camif_res->res_type ==
+			CAM_IFE_HW_MGR_RES_UNINIT) ||
+			(isp_ife_camif_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF))
+			continue;
+
+		hw_res_l = isp_ife_camif_res->hw_res[0];
+		hw_res_r = isp_ife_camif_res->hw_res[1];
+
+		CAM_DBG(CAM_ISP, "is_dual_vfe ? = %d",
+				isp_ife_camif_res->is_dual_vfe);
+		switch (isp_ife_camif_res->is_dual_vfe) {
+		/* Handling Single VFE Scenario */
+		case 0:
+			/* EOF check for Left side VFE */
+			if (!hw_res_l) {
+				pr_err("%s: VFE Device is NULL\n",
+					__func__);
+				break;
+			}
+			CAM_DBG(CAM_ISP, "curr_core_idx = %d, core idx hw = %d",
+					core_idx, hw_res_l->hw_intf->hw_idx);
+
+			if (core_idx == hw_res_l->hw_intf->hw_idx) {
+				eof_status = hw_res_l->bottom_half_handler(
+					hw_res_l, evt_payload);
+				if (atomic_read(
+					&ife_hwr_mgr_ctx->overflow_pending))
+					break;
+				if (!eof_status)
+					ife_hwr_irq_eof_cb(
+						ife_hwr_mgr_ctx->common.cb_priv,
+						CAM_ISP_HW_EVENT_EOF,
+						&eof_done_event_data);
+			}
+
+			break;
+		/* Handling dual VFE Scenario */
+		case 1:
+			if ((!hw_res_l) || (!hw_res_r)) {
+				CAM_ERR(CAM_ISP, "Dual VFE Device is NULL");
+				break;
+			}
+			if (core_idx == hw_res_l->hw_intf->hw_idx) {
+				eof_status = hw_res_l->bottom_half_handler(
+					hw_res_l, evt_payload);
+
+				if (!eof_status)
+					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
+				else
+					break;
+			}
+
+			/* EOF check for Right side VFE */
+			if (core_idx == hw_res_r->hw_intf->hw_idx) {
+				eof_status = hw_res_r->bottom_half_handler(
+					hw_res_r, evt_payload);
+
+				if (!eof_status)
+					ife_hwr_mgr_ctx->eof_cnt[core_idx]++;
+				else
+					break;
+			}
+
+			core_index0 = hw_res_l->hw_intf->hw_idx;
+			core_index1 = hw_res_r->hw_intf->hw_idx;
+
+			rc = cam_ife_hw_mgr_check_irq_for_dual_vfe(
+					ife_hwr_mgr_ctx,
+					core_index0,
+					core_index1,
+					evt_payload->evt_id);
+
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
+
+			if (!rc)
+				ife_hwr_irq_eof_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_EOF,
+					&eof_done_event_data);
+
+			break;
+
+		default:
+			CAM_ERR(CAM_ISP, "error with hw_res");
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Exit (eof_status = %d)", eof_status);
+
+	return 0;
+}
+
+
+static int cam_ife_hw_mgr_handle_buf_done_for_hw_res(
+	void                              *handler_priv,
+	void                              *payload)
+
+{
+	int32_t                              buf_done_status = 0;
+	int32_t                              i;
+	int32_t                              rc = 0;
+	cam_hw_event_cb_func                 ife_hwr_irq_wm_done_cb;
+	struct cam_isp_resource_node        *hw_res_l = NULL;
+	struct cam_ife_hw_mgr_ctx           *ife_hwr_mgr_ctx = NULL;
+	struct cam_vfe_bus_irq_evt_payload  *evt_payload = payload;
+	struct cam_ife_hw_mgr_res           *isp_ife_out_res = NULL;
+	struct cam_hw_event_recovery_data    recovery_data;
+	struct cam_isp_hw_done_event_data    buf_done_event_data = {0};
+	struct cam_isp_hw_error_event_data   error_event_data = {0};
+	uint32_t  error_resc_handle[CAM_IFE_HW_OUT_RES_MAX];
+	uint32_t  num_of_error_handles = 0;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	ife_hwr_mgr_ctx = evt_payload->ctx;
+	ife_hwr_irq_wm_done_cb =
+		ife_hwr_mgr_ctx->common.event_cb[CAM_ISP_HW_EVENT_DONE];
+
+	evt_payload->evt_id = CAM_ISP_HW_EVENT_DONE;
+
+	for (i = 0; i < CAM_IFE_HW_OUT_RES_MAX; i++) {
+		isp_ife_out_res = &ife_hwr_mgr_ctx->res_list_ife_out[i];
+
+		if (isp_ife_out_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		hw_res_l = isp_ife_out_res->hw_res[0];
+
+		/*
+		 * DUAL VFE: Index 0 is always a master. In case of composite
+		 * Error, if the error is not in master, it needs to be checked
+		 * in slave (for debuging purpose only) For other cases:
+		 * Index zero is valid
+		 */
+
+		if (hw_res_l && (evt_payload->core_index ==
+			hw_res_l->hw_intf->hw_idx))
+			buf_done_status = hw_res_l->bottom_half_handler(
+				hw_res_l, evt_payload);
+		else
+			continue;
+
+		switch (buf_done_status) {
+		case CAM_VFE_IRQ_STATUS_ERR_COMP:
+			/*
+			 * Write interface can pipeline upto 2 buffer done
+			 * strobes from each write client. If any of the client
+			 * triggers a third buffer done strobe before a
+			 * composite interrupt based on the first buffer doneis
+			 * triggered an error irq is set. This scenario can
+			 * only happen if a client is 3 frames ahead of the
+			 * other clients enabled in the same composite mask.
+			 */
+		case CAM_VFE_IRQ_STATUS_COMP_OWRT:
+			/*
+			 * It is an indication that bandwidth is not sufficient
+			 * to generate composite done irq within the VBI time.
+			 */
+
+			error_resc_handle[num_of_error_handles++] =
+					isp_ife_out_res->res_id;
+
+			if (num_of_error_handles > 0) {
+				error_event_data.error_type =
+					CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
+				goto err;
+			}
+
+			break;
+		case CAM_VFE_IRQ_STATUS_ERR:
+			break;
+		case CAM_VFE_IRQ_STATUS_SUCCESS:
+			buf_done_event_data.num_handles = 1;
+			buf_done_event_data.resource_handle[0] =
+				isp_ife_out_res->res_id;
+
+			if (atomic_read(&ife_hwr_mgr_ctx->overflow_pending))
+				break;
+			/* Report for Successful buf_done event if any */
+			if (buf_done_event_data.num_handles > 0 &&
+				ife_hwr_irq_wm_done_cb) {
+				CAM_DBG(CAM_ISP, "notify isp context");
+				ife_hwr_irq_wm_done_cb(
+					ife_hwr_mgr_ctx->common.cb_priv,
+					CAM_ISP_HW_EVENT_DONE,
+					&buf_done_event_data);
+			}
+
+			break;
+		default:
+			/* Do NOTHING */
+			error_resc_handle[num_of_error_handles++] =
+				isp_ife_out_res->res_id;
+			if (num_of_error_handles > 0) {
+				error_event_data.error_type =
+					CAM_ISP_HW_ERROR_BUSIF_OVERFLOW;
+				goto err;
+			}
+			break;
+		}
+		if (!buf_done_status)
+			CAM_DBG(CAM_ISP,
+				"buf_done status:(%d),out_res->res_id: 0x%x",
+				buf_done_status, isp_ife_out_res->res_id);
+	}
+
+	return rc;
+
+err:
+	/*
+	 * Report for error if any.
+	 * For the first phase, Error is reported as overflow, for all
+	 * the affected context and any successful buf_done event is not
+	 * reported.
+	 */
+	rc = cam_ife_hw_mgr_find_affected_ctx(ife_hwr_mgr_ctx,
+		&error_event_data, evt_payload->core_index,
+		&recovery_data);
+
+	/*
+	 * We can temporarily return from here as
+	 * for the first phase, we are going to reset entire HW.
+	 */
+
+	CAM_DBG(CAM_ISP, "Exit buf_done_status Error = %d",
+		buf_done_status);
+	return rc;
+}
+
+int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv,
+	void *evt_payload_priv)
+{
+	struct cam_ife_hw_mgr_ctx               *ife_hwr_mgr_ctx = handler_priv;
+	struct cam_vfe_bus_irq_evt_payload      *evt_payload;
+	int rc = -EINVAL;
+
+	if (!handler_priv)
+		return rc;
+
+	evt_payload = evt_payload_priv;
+	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)evt_payload->ctx;
+
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core index:0x%x",
+		evt_payload, evt_payload->core_index);
+	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+	CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
+	CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
+	CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
+		evt_payload->irq_reg_val[4]);
+	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
+		evt_payload->irq_reg_val[5]);
+	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
+		evt_payload->irq_reg_val[6]);
+	/* WM Done */
+	return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+}
+
+int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv)
+{
+	struct cam_ife_hw_mgr_ctx            *ife_hwr_mgr_ctx = handler_priv;
+	struct cam_vfe_top_irq_evt_payload   *evt_payload;
+	int rc = -EINVAL;
+
+	if (!evt_payload_priv)
+		return rc;
+
+	evt_payload = evt_payload_priv;
+	if (!handler_priv)
+		return rc;
+
+	ife_hwr_mgr_ctx = (struct cam_ife_hw_mgr_ctx *)handler_priv;
+
+	CAM_DBG(CAM_ISP, "addr of evt_payload = %pK core_index:%d",
+		(void *)evt_payload,
+		evt_payload->core_index);
+	CAM_DBG(CAM_ISP, "irq_status_0: = %x", evt_payload->irq_reg_val[0]);
+	CAM_DBG(CAM_ISP, "irq_status_1: = %x", evt_payload->irq_reg_val[1]);
+	CAM_DBG(CAM_ISP, "Violation register: = %x",
+		evt_payload->irq_reg_val[2]);
+
+	/*
+	 * If overflow/overwrite/error/violation are pending
+	 * for this context it needs to be handled remaining
+	 * interrupts are ignored.
+	 */
+	rc = cam_ife_hw_mgr_handle_camif_error(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"Encountered Error (%d), ignoring other irqs",
+			rc);
+		goto put_payload;
+	}
+
+	CAM_DBG(CAM_ISP, "Calling EOF");
+	cam_ife_hw_mgr_handle_eof_for_camif_hw_res(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+	CAM_DBG(CAM_ISP, "Calling SOF");
+	/* SOF IRQ */
+	cam_ife_hw_mgr_handle_sof(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+	CAM_DBG(CAM_ISP, "Calling RUP");
+	/* REG UPDATE */
+	cam_ife_hw_mgr_handle_reg_update(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+	CAM_DBG(CAM_ISP, "Calling EPOCH");
+	/* EPOCH IRQ */
+	cam_ife_hw_mgr_handle_epoch_for_camif_hw_res(ife_hwr_mgr_ctx,
+		evt_payload_priv);
+
+put_payload:
+	cam_vfe_put_evt_payload(evt_payload->core_info, &evt_payload);
+	return IRQ_HANDLED;
+}
+
+static int cam_ife_hw_mgr_sort_dev_with_caps(
+	struct cam_ife_hw_mgr *ife_hw_mgr)
+{
+	int i;
+
+	/* get caps for csid devices */
+	for (i = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		if (!ife_hw_mgr->csid_devices[i])
+			continue;
+		if (ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps) {
+			ife_hw_mgr->csid_devices[i]->hw_ops.get_hw_caps(
+				ife_hw_mgr->csid_devices[i]->hw_priv,
+				&ife_hw_mgr->ife_csid_dev_caps[i],
+				sizeof(ife_hw_mgr->ife_csid_dev_caps[i]));
+		}
+	}
+
+	/* get caps for ife devices */
+	for (i = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		if (!ife_hw_mgr->ife_devices[i])
+			continue;
+		if (ife_hw_mgr->ife_devices[i]->hw_ops.get_hw_caps) {
+			ife_hw_mgr->ife_devices[i]->hw_ops.get_hw_caps(
+				ife_hw_mgr->ife_devices[i]->hw_priv,
+				&ife_hw_mgr->ife_dev_caps[i],
+				sizeof(ife_hw_mgr->ife_dev_caps[i]));
+		}
+	}
+
+	return 0;
+}
+
+static int cam_ife_set_csid_debug(void *data, u64 val)
+{
+	g_ife_hw_mgr.debug_cfg.csid_debug = val;
+	CAM_DBG(CAM_ISP, "Set CSID Debug value :%lld", val);
+	return 0;
+}
+
+static int cam_ife_get_csid_debug(void *data, u64 *val)
+{
+	*val = g_ife_hw_mgr.debug_cfg.csid_debug;
+	CAM_DBG(CAM_ISP, "Get CSID Debug value :%lld",
+		g_ife_hw_mgr.debug_cfg.csid_debug);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_ife_csid_debug,
+	cam_ife_get_csid_debug,
+	cam_ife_set_csid_debug, "%16llu");
+
+static int cam_ife_set_camif_debug(void *data, u64 val)
+{
+	g_ife_hw_mgr.debug_cfg.camif_debug = val;
+	CAM_DBG(CAM_ISP,
+		"Set camif enable_diag_sensor_status value :%lld", val);
+	return 0;
+}
+
+static int cam_ife_get_camif_debug(void *data, u64 *val)
+{
+	*val = g_ife_hw_mgr.debug_cfg.camif_debug;
+	CAM_DBG(CAM_ISP,
+		"Set camif enable_diag_sensor_status value :%lld",
+		g_ife_hw_mgr.debug_cfg.csid_debug);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_ife_camif_debug,
+	cam_ife_get_camif_debug,
+	cam_ife_set_camif_debug, "%16llu");
+
+static int cam_ife_hw_mgr_debug_register(void)
+{
+	g_ife_hw_mgr.debug_cfg.dentry = debugfs_create_dir("camera_ife",
+		NULL);
+
+	if (!g_ife_hw_mgr.debug_cfg.dentry) {
+		CAM_ERR(CAM_ISP, "failed to create dentry");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_file("ife_csid_debug",
+		0644,
+		g_ife_hw_mgr.debug_cfg.dentry, NULL,
+		&cam_ife_csid_debug)) {
+		CAM_ERR(CAM_ISP, "failed to create cam_ife_csid_debug");
+		goto err;
+	}
+
+	if (!debugfs_create_u32("enable_recovery",
+		0644,
+		g_ife_hw_mgr.debug_cfg.dentry,
+		&g_ife_hw_mgr.debug_cfg.enable_recovery)) {
+		CAM_ERR(CAM_ISP, "failed to create enable_recovery");
+		goto err;
+	}
+
+	if (!debugfs_create_file("ife_camif_debug",
+		0644,
+		g_ife_hw_mgr.debug_cfg.dentry, NULL,
+		&cam_ife_camif_debug)) {
+		CAM_ERR(CAM_ISP, "failed to create cam_ife_camif_debug");
+		goto err;
+	}
+	g_ife_hw_mgr.debug_cfg.enable_recovery = 0;
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(g_ife_hw_mgr.debug_cfg.dentry);
+	return -ENOMEM;
+}
+
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl)
+{
+	int rc = -EFAULT;
+	int i, j;
+	struct cam_iommu_handle cdm_handles;
+	struct cam_ife_hw_mgr_ctx *ctx_pool;
+	struct cam_ife_hw_mgr_res *res_list_ife_out;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	memset(&g_ife_hw_mgr, 0, sizeof(g_ife_hw_mgr));
+
+	mutex_init(&g_ife_hw_mgr.ctx_mutex);
+
+	if (CAM_IFE_HW_NUM_MAX != CAM_IFE_CSID_HW_NUM_MAX) {
+		CAM_ERR(CAM_ISP, "CSID num is different then IFE num");
+		return -EINVAL;
+	}
+
+	/* fill ife hw intf information */
+	for (i = 0, j = 0; i < CAM_IFE_HW_NUM_MAX; i++) {
+		rc = cam_vfe_hw_init(&g_ife_hw_mgr.ife_devices[i], i);
+		if (!rc) {
+			struct cam_hw_info *vfe_hw =
+				(struct cam_hw_info *)
+				g_ife_hw_mgr.ife_devices[i]->hw_priv;
+			struct cam_hw_soc_info *soc_info = &vfe_hw->soc_info;
+
+			j++;
+
+			g_ife_hw_mgr.cdm_reg_map[i] = &soc_info->reg_map[0];
+			CAM_DBG(CAM_ISP,
+				"reg_map: mem base = %pK cam_base = 0x%llx",
+				(void __iomem *)soc_info->reg_map[0].mem_base,
+				(uint64_t) soc_info->reg_map[0].mem_cam_base);
+		} else {
+			g_ife_hw_mgr.cdm_reg_map[i] = NULL;
+		}
+	}
+	if (j == 0) {
+		CAM_ERR(CAM_ISP, "no valid IFE HW");
+		return -EINVAL;
+	}
+
+	/* fill csid hw intf information */
+	for (i = 0, j = 0; i < CAM_IFE_CSID_HW_NUM_MAX; i++) {
+		rc = cam_ife_csid_hw_init(&g_ife_hw_mgr.csid_devices[i], i);
+		if (!rc)
+			j++;
+	}
+	if (!j) {
+		CAM_ERR(CAM_ISP, "no valid IFE CSID HW");
+		return -EINVAL;
+	}
+
+	cam_ife_hw_mgr_sort_dev_with_caps(&g_ife_hw_mgr);
+
+	/* setup ife context list */
+	INIT_LIST_HEAD(&g_ife_hw_mgr.free_ctx_list);
+	INIT_LIST_HEAD(&g_ife_hw_mgr.used_ctx_list);
+
+	/*
+	 *  for now, we only support one iommu handle. later
+	 *  we will need to setup more iommu handle for other
+	 *  use cases.
+	 *  Also, we have to release them once we have the
+	 *  deinit support
+	 */
+	if (cam_smmu_get_handle("ife",
+		&g_ife_hw_mgr.mgr_common.img_iommu_hdl)) {
+		CAM_ERR(CAM_ISP, "Can not get iommu handle");
+		return -EINVAL;
+	}
+
+	if (cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+		CAM_SMMU_ATTACH)) {
+		CAM_ERR(CAM_ISP, "Attach iommu handle failed.");
+		goto attach_fail;
+	}
+
+	if (cam_smmu_get_handle("cam-secure",
+		&g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure)) {
+		CAM_ERR(CAM_ISP, "Failed to get secure iommu handle");
+		goto secure_fail;
+	}
+
+	CAM_DBG(CAM_ISP, "iommu_handles: non-secure[0x%x], secure[0x%x]",
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
+
+	if (!cam_cdm_get_iommu_handle("ife", &cdm_handles)) {
+		CAM_DBG(CAM_ISP, "Successfully acquired the CDM iommu handles");
+		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = cdm_handles.non_secure;
+		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure =
+			cdm_handles.secure;
+	} else {
+		CAM_DBG(CAM_ISP, "Failed to acquire the CDM iommu handles");
+		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl = -1;
+		g_ife_hw_mgr.mgr_common.cmd_iommu_hdl_secure = -1;
+	}
+
+	atomic_set(&g_ife_hw_mgr.active_ctx_cnt, 0);
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		memset(&g_ife_hw_mgr.ctx_pool[i], 0,
+			sizeof(g_ife_hw_mgr.ctx_pool[i]));
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].list);
+
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_in.list);
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_cid);
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_csid);
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].res_list_ife_src);
+		ctx_pool = &g_ife_hw_mgr.ctx_pool[i];
+		for (j = 0; j < CAM_IFE_HW_OUT_RES_MAX; j++) {
+			res_list_ife_out = &ctx_pool->res_list_ife_out[j];
+			INIT_LIST_HEAD(&res_list_ife_out->list);
+		}
+
+		/* init context pool */
+		INIT_LIST_HEAD(&g_ife_hw_mgr.ctx_pool[i].free_res_list);
+		for (j = 0; j < CAM_IFE_HW_RES_POOL_MAX; j++) {
+			INIT_LIST_HEAD(
+				&g_ife_hw_mgr.ctx_pool[i].res_pool[j].list);
+			list_add_tail(
+				&g_ife_hw_mgr.ctx_pool[i].res_pool[j].list,
+				&g_ife_hw_mgr.ctx_pool[i].free_res_list);
+		}
+
+		g_ife_hw_mgr.ctx_pool[i].cdm_cmd =
+			kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+				((CAM_IFE_HW_ENTRIES_MAX - 1) *
+				 sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+		if (!g_ife_hw_mgr.ctx_pool[i].cdm_cmd) {
+			rc = -ENOMEM;
+			CAM_ERR(CAM_ISP, "Allocation Failed for cdm command");
+			goto end;
+		}
+
+		g_ife_hw_mgr.ctx_pool[i].ctx_index = i;
+		g_ife_hw_mgr.ctx_pool[i].hw_mgr = &g_ife_hw_mgr;
+
+		cam_tasklet_init(&g_ife_hw_mgr.mgr_common.tasklet_pool[i],
+			&g_ife_hw_mgr.ctx_pool[i], i);
+		g_ife_hw_mgr.ctx_pool[i].common.tasklet_info =
+			g_ife_hw_mgr.mgr_common.tasklet_pool[i];
+
+
+		init_completion(&g_ife_hw_mgr.ctx_pool[i].config_done_complete);
+		list_add_tail(&g_ife_hw_mgr.ctx_pool[i].list,
+			&g_ife_hw_mgr.free_ctx_list);
+	}
+
+	/* Create Worker for ife_hw_mgr with 10 tasks */
+	rc = cam_req_mgr_workq_create("cam_ife_worker", 10,
+			&g_ife_hw_mgr.workq, CRM_WORKQ_USAGE_NON_IRQ, 0);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "Unable to create worker");
+		goto end;
+	}
+
+	/* fill return structure */
+	hw_mgr_intf->hw_mgr_priv = &g_ife_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_ife_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_ife_mgr_acquire;
+	hw_mgr_intf->hw_start = cam_ife_mgr_start_hw;
+	hw_mgr_intf->hw_stop = cam_ife_mgr_stop_hw;
+	hw_mgr_intf->hw_read = cam_ife_mgr_read;
+	hw_mgr_intf->hw_write = cam_ife_mgr_write;
+	hw_mgr_intf->hw_release = cam_ife_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_ife_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config = cam_ife_mgr_config_hw;
+	hw_mgr_intf->hw_cmd = cam_ife_mgr_cmd;
+
+	if (iommu_hdl)
+		*iommu_hdl = g_ife_hw_mgr.mgr_common.img_iommu_hdl;
+
+	cam_ife_hw_mgr_debug_register();
+	CAM_DBG(CAM_ISP, "Exit");
+
+	return 0;
+end:
+	if (rc) {
+		for (i = 0; i < CAM_CTX_MAX; i++) {
+			cam_tasklet_deinit(
+				&g_ife_hw_mgr.mgr_common.tasklet_pool[i]);
+			kfree(g_ife_hw_mgr.ctx_pool[i].cdm_cmd);
+			g_ife_hw_mgr.ctx_pool[i].cdm_cmd = NULL;
+			g_ife_hw_mgr.ctx_pool[i].common.tasklet_info = NULL;
+		}
+	}
+	cam_smmu_destroy_handle(
+		g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure);
+	g_ife_hw_mgr.mgr_common.img_iommu_hdl_secure = -1;
+secure_fail:
+	cam_smmu_ops(g_ife_hw_mgr.mgr_common.img_iommu_hdl,
+		CAM_SMMU_DETACH);
+attach_fail:
+	cam_smmu_destroy_handle(g_ife_hw_mgr.mgr_common.img_iommu_hdl);
+	g_ife_hw_mgr.mgr_common.img_iommu_hdl = -1;
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
new file mode 100644
index 0000000..2cdfa52
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IFE_HW_MGR_H_
+#define _CAM_IFE_HW_MGR_H_
+
+#include <linux/completion.h>
+#include "cam_isp_hw_mgr.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_tasklet_util.h"
+
+/* enum cam_ife_hw_mgr_res_type - manager resource node type */
+enum cam_ife_hw_mgr_res_type {
+	CAM_IFE_HW_MGR_RES_UNINIT,
+	CAM_IFE_HW_MGR_RES_ROOT,
+	CAM_IFE_HW_MGR_RES_CID,
+	CAM_IFE_HW_MGR_RES_CSID,
+	CAM_IFE_HW_MGR_RES_IFE_SRC,
+	CAM_IFE_HW_MGR_RES_IFE_OUT,
+};
+
+/* IFE resource constants */
+#define CAM_IFE_HW_IN_RES_MAX            (CAM_ISP_IFE_IN_RES_MAX & 0xFF)
+#define CAM_IFE_HW_OUT_RES_MAX           (CAM_ISP_IFE_OUT_RES_MAX & 0xFF)
+#define CAM_IFE_HW_RES_POOL_MAX          64
+
+/**
+ * struct cam_vfe_hw_mgr_res- HW resources for the VFE manager
+ *
+ * @list:                used by the resource list
+ * @res_type:            IFE manager resource type
+ * @res_id:              resource id based on the resource type for root or
+ *                       leaf resource, it matches the KMD interface port id.
+ *                       For branch resrouce, it is defined by the ISP HW
+ *                       layer
+ * @hw_res:              hw layer resource array. For single VFE, only one VFE
+ *                       hw resrouce will be acquired. For dual VFE, two hw
+ *                       resources from different VFE HW device will be
+ *                       acquired
+ * @parent:              point to the parent resource node.
+ * @children:            point to the children resource nodes
+ * @child_num:           numbe of the child resource node.
+ * @is_secure            informs whether the resource is in secure mode or not
+ *
+ */
+struct cam_ife_hw_mgr_res {
+	struct list_head                 list;
+	enum cam_ife_hw_mgr_res_type     res_type;
+	uint32_t                         res_id;
+	uint32_t                         is_dual_vfe;
+	struct cam_isp_resource_node    *hw_res[CAM_ISP_HW_SPLIT_MAX];
+
+	/* graph */
+	struct cam_ife_hw_mgr_res       *parent;
+	struct cam_ife_hw_mgr_res       *child[CAM_IFE_HW_OUT_RES_MAX];
+	uint32_t                         num_children;
+	uint32_t                         is_secure;
+};
+
+
+/**
+ * struct ctx_base_info - Base hardware information for the context
+ *
+ * @idx:                 Base resource index
+ * @split_id:            Split info for the base resource
+ *
+ */
+struct ctx_base_info {
+	uint32_t                       idx;
+	enum cam_isp_hw_split_id       split_id;
+};
+
+/**
+ * struct cam_ife_hw_mgr_debug - contain the debug information
+ *
+ * @dentry:                    Debugfs entry
+ * @csid_debug:                csid debug information
+ * @enable_recovery:           enable recovery
+ * @enable_diag_sensor_status: enable sensor diagnosis status
+ *
+ */
+struct cam_ife_hw_mgr_debug {
+	struct dentry  *dentry;
+	uint64_t       csid_debug;
+	uint32_t       enable_recovery;
+	uint32_t       camif_debug;
+};
+
+/**
+ * struct cam_vfe_hw_mgr_ctx - IFE HW manager Context object
+ *
+ * @list:                   used by the ctx list.
+ * @common:                 common acquired context data
+ * @ctx_index:              acquired context id.
+ * @hw_mgr:                 IFE hw mgr which owns this context
+ * @ctx_in_use:             flag to tell whether context is active
+ * @res_list_ife_in:        Starting resource(TPG,PHY0, PHY1...) Can only be
+ *                          one.
+ * @res_list_csid:          CSID resource list
+ * @res_list_ife_src:       IFE input resource list
+ * @res_list_ife_out:       IFE output resoruces array
+ * @free_res_list:          Free resources list for the branch node
+ * @res_pool:               memory storage for the free resource list
+ * @irq_status0_mask:       irq_status0_mask for the context
+ * @irq_status1_mask:       irq_status1_mask for the context
+ * @base                    device base index array contain the all IFE HW
+ *                          instance associated with this context.
+ * @num_base                number of valid base data in the base array
+ * @cdm_handle              cdm hw acquire handle
+ * @cdm_ops                 cdm util operation pointer for building
+ *                          cdm commands
+ * @cdm_cmd                 cdm base and length request pointer
+ * @sof_cnt                 sof count value per core, used for dual VFE
+ * @epoch_cnt               epoch count value per core, used for dual VFE
+ * @eof_cnt                 eof count value per core, used for dual VFE
+ * @overflow_pending        flat to specify the overflow is pending for the
+ *                          context
+ * @is_rdi_only_context     flag to specify the context has only rdi resource
+ * @config_done_complete    indicator for configuration complete
+ * @init_done               indicate whether init hw is done
+ */
+struct cam_ife_hw_mgr_ctx {
+	struct list_head                list;
+	struct cam_isp_hw_mgr_ctx       common;
+
+	uint32_t                        ctx_index;
+	struct cam_ife_hw_mgr          *hw_mgr;
+	uint32_t                        ctx_in_use;
+
+	struct cam_ife_hw_mgr_res       res_list_ife_in;
+	struct list_head                res_list_ife_cid;
+	struct list_head                res_list_ife_csid;
+	struct list_head                res_list_ife_src;
+	struct cam_ife_hw_mgr_res       res_list_ife_out[
+						CAM_IFE_HW_OUT_RES_MAX];
+
+	struct list_head                free_res_list;
+	struct cam_ife_hw_mgr_res       res_pool[CAM_IFE_HW_RES_POOL_MAX];
+
+	uint32_t                        irq_status0_mask[CAM_IFE_HW_NUM_MAX];
+	uint32_t                        irq_status1_mask[CAM_IFE_HW_NUM_MAX];
+	struct ctx_base_info            base[CAM_IFE_HW_NUM_MAX];
+	uint32_t                        num_base;
+	uint32_t                        cdm_handle;
+	struct cam_cdm_utils_ops       *cdm_ops;
+	struct cam_cdm_bl_request      *cdm_cmd;
+
+	uint32_t                        sof_cnt[CAM_IFE_HW_NUM_MAX];
+	uint32_t                        epoch_cnt[CAM_IFE_HW_NUM_MAX];
+	uint32_t                        eof_cnt[CAM_IFE_HW_NUM_MAX];
+	atomic_t                        overflow_pending;
+	uint32_t                        is_rdi_only_context;
+	struct completion               config_done_complete;
+	bool                            init_done;
+};
+
+/**
+ * struct cam_ife_hw_mgr - IFE HW Manager
+ *
+ * @mgr_common:            common data for all HW managers
+ * @csid_devices;          csid device instances array. This will be filled by
+ *                         HW manager during the initialization.
+ * @ife_devices:           IFE device instances array. This will be filled by
+ *                         HW layer during initialization
+ * @ctx_mutex:             mutex for the hw context pool
+ * @free_ctx_list:         free hw context list
+ * @used_ctx_list:         used hw context list
+ * @ctx_pool:              context storage
+ * @ife_csid_dev_caps      csid device capability stored per core
+ * @ife_dev_caps           ife device capability per core
+ * @work q                 work queue for IFE hw manager
+ * @debug_cfg              debug configuration
+ */
+struct cam_ife_hw_mgr {
+	struct cam_isp_hw_mgr          mgr_common;
+	struct cam_hw_intf            *csid_devices[CAM_IFE_CSID_HW_NUM_MAX];
+	struct cam_hw_intf            *ife_devices[CAM_IFE_HW_NUM_MAX];
+	struct cam_soc_reg_map        *cdm_reg_map[CAM_IFE_HW_NUM_MAX];
+
+	struct mutex                   ctx_mutex;
+	atomic_t                       active_ctx_cnt;
+	struct list_head               free_ctx_list;
+	struct list_head               used_ctx_list;
+	struct cam_ife_hw_mgr_ctx      ctx_pool[CAM_CTX_MAX];
+
+	struct cam_ife_csid_hw_caps    ife_csid_dev_caps[
+						CAM_IFE_CSID_HW_NUM_MAX];
+	struct cam_vfe_hw_get_hw_cap   ife_dev_caps[CAM_IFE_HW_NUM_MAX];
+	struct cam_req_mgr_core_workq *workq;
+	struct cam_ife_hw_mgr_debug    debug_cfg;
+};
+
+/**
+ * cam_ife_hw_mgr_init()
+ *
+ * @brief:              Initialize the IFE hardware manger. This is the
+ *                      etnry functinon for the IFE HW manager.
+ *
+ * @hw_mgr_intf:        IFE hardware manager object returned
+ * @iommu_hdl:          Iommu handle to be returned
+ *
+ */
+int cam_ife_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf, int *iommu_hdl);
+
+/**
+ * cam_ife_mgr_do_tasklet_buf_done()
+ *
+ * @brief:              Main tasklet handle function for the buf done event
+ *
+ * @handler_priv:       Tasklet information handle
+ * @evt_payload_priv:   Event payload for the handler funciton
+ *
+ */
+int cam_ife_mgr_do_tasklet_buf_done(void *handler_priv, void *evt_payload_priv);
+
+/**
+ * cam_ife_mgr_do_tasklet()
+ *
+ * @brief:              Main tasklet handle function for mux resource events
+ *
+ * @handler_priv:       Tasklet information handle
+ * @evt_payload_priv:   Event payload for the handler funciton
+ *
+ */
+int cam_ife_mgr_do_tasklet(void *handler_priv, void *evt_payload_priv);
+
+#endif /* _CAM_IFE_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
new file mode 100644
index 0000000..c3ab60c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_debug_util.h"
+
+
+int cam_isp_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl)
+{
+	int rc = 0;
+	const char *compat_str = NULL;
+
+	rc = of_property_read_string_index(of_node, "arch-compat", 0,
+		(const char **)&compat_str);
+
+	if (strnstr(compat_str, "ife", strlen(compat_str)))
+		rc = cam_ife_hw_mgr_init(hw_mgr, iommu_hdl);
+	else {
+		CAM_ERR(CAM_ISP, "Invalid ISP hw type");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
new file mode 100644
index 0000000..e6c058e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_isp_hw_mgr.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ISP_HW_MGR_H_
+#define _CAM_ISP_HW_MGR_H_
+
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_tasklet_util.h"
+
+#define CAM_ISP_HW_NUM_MAX                       4
+
+/**
+ * struct cam_isp_hw_mgr_ctx - common acquired context for managers
+ *
+ * @takslet_info:          assciated tasklet
+ * @event_cb:              call back interface to ISP context. Set during
+ *                         acquire device
+ * @cb_priv:               first argument for the call back function
+ *                         set during acquire device
+ *
+ */
+struct cam_isp_hw_mgr_ctx {
+	void                           *tasklet_info;
+	cam_hw_event_cb_func            event_cb[CAM_ISP_HW_EVENT_MAX];
+	void                           *cb_priv;
+};
+
+/**
+ * struct cam_isp_hw_mgr - ISP HW Manager common object
+ *
+ * @tasklet_pool:             Tasklet pool
+ * @img_iommu_hdl:            iommu memory handle for regular image buffer
+ * @img_iommu_hdl_secure:     iommu memory handle for secure image buffer
+ * @cmd_iommu_hdl:            iommu memory handle for regular command buffer
+ * @cmd_iommu_hdl:            iommu memory handle for secure command buffer
+ * @scratch_buf_range:        scratch buffer range (not for IFE)
+ * @scratch_buf_addr:         scratch buffer address (not for IFE)
+ *
+ */
+struct cam_isp_hw_mgr {
+	void                           *tasklet_pool[CAM_CTX_MAX];
+	int                             img_iommu_hdl;
+	int                             img_iommu_hdl_secure;
+	int                             cmd_iommu_hdl;
+	int                             cmd_iommu_hdl_secure;
+	uint32_t                        scratch_buf_range;
+	dma_addr_t                      scratch_buf_addr;
+};
+
+/**
+ * struct cam_hw_event_recovery_data - Payload for the recovery procedure
+ *
+ * @error_type:               Error type that causes the recovery
+ * @affected_core:            Array of the hardware cores that are affected
+ * @affected_ctx:             Array of the hardware contexts that are affected
+ * @no_of_context:            Actual number of the affected context
+ *
+ */
+struct cam_hw_event_recovery_data {
+	uint32_t                   error_type;
+	uint32_t                   affected_core[CAM_ISP_HW_NUM_MAX];
+	struct cam_ife_hw_mgr_ctx *affected_ctx[CAM_CTX_MAX];
+	uint32_t                   no_of_context;
+};
+#endif /* _CAM_ISP_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
new file mode 100644
index 0000000..79b3e15
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_tasklet_util.o cam_isp_packet_parser.o
+obj-$(CONFIG_SPECTRA_CAMERA) += irq_controller/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
new file mode 100644
index 0000000..98f56fa
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_isp_packet_parser.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <uapi/media/cam_defs.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_mem_mgr.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_isp_packet_parser.h"
+#include "cam_debug_util.h"
+
+int cam_isp_add_change_base(
+	struct cam_hw_prepare_update_args      *prepare,
+	struct list_head                       *res_list_isp_src,
+	uint32_t                                base_idx,
+	struct cam_kmd_buf_info                *kmd_buf_info)
+{
+	int rc = -EINVAL;
+	struct cam_ife_hw_mgr_res       *hw_mgr_res;
+	struct cam_isp_resource_node    *res;
+	struct cam_isp_hw_get_cmd_update get_base;
+	struct cam_hw_update_entry      *hw_entry;
+	uint32_t                         num_ent, i;
+
+	hw_entry = prepare->hw_update_entries;
+	num_ent = prepare->num_hw_update_entries;
+
+	/* Max one hw entries required for each base */
+	if (num_ent + 1 >= prepare->max_hw_update_entries) {
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			num_ent, prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	list_for_each_entry(hw_mgr_res, res_list_isp_src, list) {
+		if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			res = hw_mgr_res->hw_res[i];
+			if (res->hw_intf->hw_idx != base_idx)
+				continue;
+
+			get_base.res  = res;
+			get_base.cmd_type = CAM_ISP_HW_CMD_GET_CHANGE_BASE;
+			get_base.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
+				kmd_buf_info->used_bytes/4;
+			get_base.cmd.size  = kmd_buf_info->size -
+					kmd_buf_info->used_bytes;
+
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_GET_CHANGE_BASE, &get_base,
+				sizeof(struct cam_isp_hw_get_cmd_update));
+			if (rc)
+				return rc;
+
+			hw_entry[num_ent].handle = kmd_buf_info->handle;
+			hw_entry[num_ent].len    = get_base.cmd.used_bytes;
+			hw_entry[num_ent].offset = kmd_buf_info->offset;
+			CAM_DBG(CAM_ISP,
+				"num_ent=%d handle=0x%x, len=%u, offset=%u",
+				num_ent,
+				hw_entry[num_ent].handle,
+				hw_entry[num_ent].len,
+				hw_entry[num_ent].offset);
+
+			kmd_buf_info->used_bytes += get_base.cmd.used_bytes;
+			kmd_buf_info->offset     += get_base.cmd.used_bytes;
+			num_ent++;
+			prepare->num_hw_update_entries = num_ent;
+
+			/* return success */
+			return 0;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_isp_update_dual_config(
+	struct cam_hw_prepare_update_args  *prepare,
+	struct cam_cmd_buf_desc            *cmd_desc,
+	uint32_t                            split_id,
+	uint32_t                            base_idx,
+	struct cam_ife_hw_mgr_res          *res_list_isp_out,
+	uint32_t                            size_isp_out)
+{
+	int rc = -EINVAL;
+	struct cam_isp_dual_config                 *dual_config;
+	struct cam_ife_hw_mgr_res                  *hw_mgr_res;
+	struct cam_isp_resource_node               *res;
+	struct cam_isp_hw_dual_isp_update_args      dual_isp_update_args;
+	uint32_t                                    outport_id;
+	uint32_t                                    ports_plane_idx;
+	size_t                                      len = 0;
+	uint32_t                                   *cpu_addr;
+	uint32_t                                    i, j;
+
+	CAM_DBG(CAM_UTIL, "cmd des size %d, length: %d",
+		cmd_desc->size, cmd_desc->length);
+
+	rc = cam_packet_util_get_cmd_mem_addr(
+		cmd_desc->mem_handle, &cpu_addr, &len);
+	if (rc)
+		return rc;
+
+	cpu_addr += (cmd_desc->offset / 4);
+	dual_config = (struct cam_isp_dual_config *)cpu_addr;
+
+	for (i = 0; i < dual_config->num_ports; i++) {
+
+		if (i >= CAM_ISP_IFE_OUT_RES_MAX) {
+			CAM_ERR(CAM_UTIL,
+				"failed update for i:%d > size_isp_out:%d",
+				i, size_isp_out);
+			return -EINVAL;
+		}
+
+		hw_mgr_res = &res_list_isp_out[i];
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			if (!hw_mgr_res->hw_res[j])
+				continue;
+
+			if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
+				continue;
+
+			res = hw_mgr_res->hw_res[j];
+
+			if (res->res_id < CAM_ISP_IFE_OUT_RES_BASE ||
+				res->res_id >= CAM_ISP_IFE_OUT_RES_MAX)
+				continue;
+
+			outport_id = res->res_id & 0xFF;
+
+			ports_plane_idx = (j * (dual_config->num_ports *
+				CAM_PACKET_MAX_PLANES)) +
+				(outport_id * CAM_PACKET_MAX_PLANES);
+
+			if (dual_config->stripes[ports_plane_idx].port_id == 0)
+				continue;
+
+			dual_isp_update_args.split_id = j;
+			dual_isp_update_args.res      = res;
+			dual_isp_update_args.dual_cfg = dual_config;
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_STRIPE_UPDATE,
+				&dual_isp_update_args,
+				sizeof(struct cam_isp_hw_dual_isp_update_args));
+			if (rc)
+				return rc;
+		}
+	}
+
+	return rc;
+}
+
+int cam_isp_add_cmd_buf_update(
+	struct cam_ife_hw_mgr_res            *hw_mgr_res,
+	uint32_t                              cmd_type,
+	uint32_t                              hw_cmd_type,
+	uint32_t                              base_idx,
+	uint32_t                             *cmd_buf_addr,
+	uint32_t                              kmd_buf_remain_size,
+	void                                 *cmd_update_data,
+	uint32_t                             *bytes_used)
+{
+	int rc = 0;
+	struct cam_isp_resource_node       *res;
+	struct cam_isp_hw_get_cmd_update    cmd_update;
+	uint32_t                            i;
+	uint32_t                            total_used_bytes = 0;
+
+	if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+		CAM_ERR(CAM_ISP, "io res id:%d not valid",
+			hw_mgr_res->res_type);
+		return -EINVAL;
+	}
+
+	cmd_update.cmd_type = hw_cmd_type;
+	cmd_update.cmd.cmd_buf_addr = cmd_buf_addr;
+	cmd_update.cmd.size = kmd_buf_remain_size;
+	cmd_update.cmd.used_bytes = 0;
+	cmd_update.data = cmd_update_data;
+	CAM_DBG(CAM_ISP, "cmd_type %u cmd buffer 0x%pK, size %d",
+		cmd_update.cmd_type,
+		cmd_update.cmd.cmd_buf_addr,
+		cmd_update.cmd.size);
+
+	for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+		if (!hw_mgr_res->hw_res[i])
+			continue;
+
+		if (hw_mgr_res->hw_res[i]->hw_intf->hw_idx != base_idx)
+			continue;
+
+		res = hw_mgr_res->hw_res[i];
+		cmd_update.res = res;
+
+		rc = res->hw_intf->hw_ops.process_cmd(
+			res->hw_intf->hw_priv,
+			cmd_update.cmd_type, &cmd_update,
+			sizeof(struct cam_isp_hw_get_cmd_update));
+
+		if (rc) {
+			CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+				res->res_id);
+			rc = -ENOMEM;
+			return rc;
+		}
+
+		total_used_bytes += cmd_update.cmd.used_bytes;
+	}
+	*bytes_used = total_used_bytes;
+	CAM_DBG(CAM_ISP, "total_used_bytes %u", total_used_bytes);
+	return rc;
+}
+
+int cam_isp_add_command_buffers(
+	struct cam_hw_prepare_update_args  *prepare,
+	struct cam_kmd_buf_info            *kmd_buf_info,
+	struct ctx_base_info               *base_info,
+	cam_packet_generic_blob_handler     blob_handler_cb,
+	struct cam_ife_hw_mgr_res          *res_list_isp_out,
+	uint32_t                            size_isp_out)
+{
+	int rc = 0;
+	uint32_t                           cmd_meta_data, num_ent, i;
+	uint32_t                           base_idx;
+	enum cam_isp_hw_split_id           split_id;
+	struct cam_cmd_buf_desc           *cmd_desc = NULL;
+	struct cam_hw_update_entry        *hw_entry;
+
+	hw_entry = prepare->hw_update_entries;
+	split_id = base_info->split_id;
+	base_idx = base_info->idx;
+
+	/*
+	 * set the cmd_desc to point the first command descriptor in the
+	 * packet
+	 */
+	cmd_desc = (struct cam_cmd_buf_desc *)
+			((uint8_t *)&prepare->packet->payload +
+			prepare->packet->cmd_buf_offset);
+
+	CAM_DBG(CAM_ISP, "split id = %d, number of command buffers:%d",
+		split_id, prepare->packet->num_cmd_buf);
+
+	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		num_ent = prepare->num_hw_update_entries;
+		if (!cmd_desc[i].length)
+			continue;
+
+		/* One hw entry space required for left or right or common */
+		if (num_ent + 1 >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+				num_ent, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc)
+			return rc;
+
+		cmd_meta_data = cmd_desc[i].meta_data;
+
+		CAM_DBG(CAM_ISP, "meta type: %d, split_id: %d",
+			cmd_meta_data, split_id);
+
+		switch (cmd_meta_data) {
+		case CAM_ISP_PACKET_META_BASE:
+		case CAM_ISP_PACKET_META_LEFT:
+		case CAM_ISP_PACKET_META_DMI_LEFT:
+			if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+				hw_entry[num_ent].len = cmd_desc[i].length;
+				hw_entry[num_ent].handle =
+					cmd_desc[i].mem_handle;
+				hw_entry[num_ent].offset = cmd_desc[i].offset;
+				CAM_DBG(CAM_ISP,
+					"Meta_Left num_ent=%d handle=0x%x, len=%u, offset=%u",
+					num_ent,
+					hw_entry[num_ent].handle,
+					hw_entry[num_ent].len,
+					hw_entry[num_ent].offset);
+
+				if (cmd_meta_data ==
+					CAM_ISP_PACKET_META_DMI_LEFT)
+					hw_entry[num_ent].flags = 0x1;
+
+				num_ent++;
+			}
+			break;
+		case CAM_ISP_PACKET_META_RIGHT:
+		case CAM_ISP_PACKET_META_DMI_RIGHT:
+			if (split_id == CAM_ISP_HW_SPLIT_RIGHT) {
+				hw_entry[num_ent].len = cmd_desc[i].length;
+				hw_entry[num_ent].handle =
+					cmd_desc[i].mem_handle;
+				hw_entry[num_ent].offset = cmd_desc[i].offset;
+				CAM_DBG(CAM_ISP,
+					"Meta_Right num_ent=%d handle=0x%x, len=%u, offset=%u",
+					num_ent,
+					hw_entry[num_ent].handle,
+					hw_entry[num_ent].len,
+					hw_entry[num_ent].offset);
+
+				if (cmd_meta_data ==
+					CAM_ISP_PACKET_META_DMI_RIGHT)
+					hw_entry[num_ent].flags = 0x1;
+				num_ent++;
+			}
+			break;
+		case CAM_ISP_PACKET_META_COMMON:
+		case CAM_ISP_PACKET_META_DMI_COMMON:
+			hw_entry[num_ent].len = cmd_desc[i].length;
+			hw_entry[num_ent].handle =
+				cmd_desc[i].mem_handle;
+			hw_entry[num_ent].offset = cmd_desc[i].offset;
+			CAM_DBG(CAM_ISP,
+				"Meta_Common num_ent=%d handle=0x%x, len=%u, offset=%u",
+				num_ent,
+				hw_entry[num_ent].handle,
+				hw_entry[num_ent].len,
+				hw_entry[num_ent].offset);
+			if (cmd_meta_data == CAM_ISP_PACKET_META_DMI_COMMON)
+				hw_entry[num_ent].flags = 0x1;
+
+			num_ent++;
+			break;
+		case CAM_ISP_PACKET_META_DUAL_CONFIG:
+			rc = cam_isp_update_dual_config(prepare,
+				&cmd_desc[i], split_id, base_idx,
+				res_list_isp_out, size_isp_out);
+
+			if (rc)
+				return rc;
+			break;
+		case CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT:
+			if (split_id == CAM_ISP_HW_SPLIT_LEFT) {
+				struct cam_isp_generic_blob_info   blob_info;
+
+				prepare->num_hw_update_entries = num_ent;
+				blob_info.prepare = prepare;
+				blob_info.base_info = base_info;
+				blob_info.kmd_buf_info = kmd_buf_info;
+
+				rc = cam_packet_util_process_generic_cmd_buffer(
+					&cmd_desc[i],
+					blob_handler_cb,
+					&blob_info);
+				if (rc) {
+					CAM_ERR(CAM_ISP,
+						"Failed in processing blobs %d",
+						rc);
+					return rc;
+				}
+				num_ent = prepare->num_hw_update_entries;
+			}
+			break;
+		case CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT:
+			if (split_id == CAM_ISP_HW_SPLIT_RIGHT) {
+				struct cam_isp_generic_blob_info   blob_info;
+
+				prepare->num_hw_update_entries = num_ent;
+				blob_info.prepare = prepare;
+				blob_info.base_info = base_info;
+				blob_info.kmd_buf_info = kmd_buf_info;
+
+				rc = cam_packet_util_process_generic_cmd_buffer(
+					&cmd_desc[i],
+					blob_handler_cb,
+					&blob_info);
+				if (rc) {
+					CAM_ERR(CAM_ISP,
+						"Failed in processing blobs %d",
+						rc);
+					return rc;
+				}
+				num_ent = prepare->num_hw_update_entries;
+			}
+			break;
+		case CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON: {
+			struct cam_isp_generic_blob_info   blob_info;
+
+			prepare->num_hw_update_entries = num_ent;
+			blob_info.prepare = prepare;
+			blob_info.base_info = base_info;
+			blob_info.kmd_buf_info = kmd_buf_info;
+
+			rc = cam_packet_util_process_generic_cmd_buffer(
+				&cmd_desc[i],
+				blob_handler_cb,
+				&blob_info);
+			if (rc) {
+				CAM_ERR(CAM_ISP,
+					"Failed in processing blobs %d", rc);
+				return rc;
+			}
+			num_ent = prepare->num_hw_update_entries;
+		}
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "invalid cdm command meta data %d",
+				cmd_meta_data);
+			return -EINVAL;
+		}
+		prepare->num_hw_update_entries = num_ent;
+	}
+
+	return rc;
+}
+
+int cam_isp_add_io_buffers(
+	int                                   iommu_hdl,
+	int                                   sec_iommu_hdl,
+	struct cam_hw_prepare_update_args    *prepare,
+	uint32_t                              base_idx,
+	struct cam_kmd_buf_info              *kmd_buf_info,
+	struct cam_ife_hw_mgr_res            *res_list_isp_out,
+	uint32_t                              size_isp_out,
+	bool                                  fill_fence)
+{
+	int rc = 0;
+	uint64_t                            io_addr[CAM_PACKET_MAX_PLANES];
+	struct cam_buf_io_cfg              *io_cfg;
+	struct cam_isp_resource_node       *res;
+	struct cam_ife_hw_mgr_res          *hw_mgr_res;
+	struct cam_isp_hw_get_cmd_update    update_buf;
+	struct cam_isp_hw_get_wm_update     wm_update;
+	struct cam_hw_fence_map_entry      *out_map_entries;
+	struct cam_hw_fence_map_entry      *in_map_entries;
+	uint32_t                            kmd_buf_remain_size;
+	uint32_t                            i, j, num_out_buf, num_in_buf;
+	uint32_t                            res_id_out, res_id_in, plane_id;
+	uint32_t                            io_cfg_used_bytes, num_ent;
+	size_t                              size;
+	int32_t                             hdl;
+	int                                 mmu_hdl;
+	bool                                mode, is_buf_secure;
+
+	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+			&prepare->packet->payload +
+			prepare->packet->io_configs_offset);
+	num_out_buf = 0;
+	num_in_buf  = 0;
+	io_cfg_used_bytes = 0;
+	prepare->pf_data->packet = prepare->packet;
+
+	/* Max one hw entries required for each base */
+	if (prepare->num_hw_update_entries + 1 >=
+			prepare->max_hw_update_entries) {
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < prepare->packet->num_io_configs; i++) {
+		CAM_DBG(CAM_ISP, "======= io config idx %d ============", i);
+		CAM_DBG(CAM_REQ,
+			"i %d req_id %llu resource_type:%d fence:%d direction %d",
+			i, prepare->packet->header.request_id,
+			io_cfg[i].resource_type, io_cfg[i].fence,
+			io_cfg[i].direction);
+		CAM_DBG(CAM_ISP, "format: %d", io_cfg[i].format);
+
+		if (io_cfg[i].direction == CAM_BUF_OUTPUT) {
+			res_id_out = io_cfg[i].resource_type & 0xFF;
+			if (res_id_out >= size_isp_out) {
+				CAM_ERR(CAM_ISP, "invalid out restype:%x",
+					io_cfg[i].resource_type);
+				return -EINVAL;
+			}
+
+			CAM_DBG(CAM_ISP,
+				"configure output io with fill fence %d",
+				fill_fence);
+			out_map_entries =
+				&prepare->out_map_entries[num_out_buf];
+			if (fill_fence) {
+				if (num_out_buf <
+					prepare->max_out_map_entries) {
+					out_map_entries->resource_handle =
+						io_cfg[i].resource_type;
+					out_map_entries->sync_id =
+						io_cfg[i].fence;
+					num_out_buf++;
+				} else {
+					CAM_ERR(CAM_ISP, "ln_out:%d max_ln:%d",
+						num_out_buf,
+						prepare->max_out_map_entries);
+					return -EINVAL;
+				}
+			}
+
+			hw_mgr_res = &res_list_isp_out[res_id_out];
+			if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT) {
+				CAM_ERR(CAM_ISP, "io res id:%d not valid",
+					io_cfg[i].resource_type);
+				return -EINVAL;
+			}
+		} else if (io_cfg[i].direction == CAM_BUF_INPUT) {
+			res_id_in = io_cfg[i].resource_type & 0xFF;
+			CAM_DBG(CAM_ISP,
+				"configure input io with fill fence %d",
+				fill_fence);
+			in_map_entries =
+				&prepare->in_map_entries[num_in_buf];
+			if (fill_fence) {
+				if (num_in_buf < prepare->max_in_map_entries) {
+					in_map_entries->resource_handle =
+						io_cfg[i].resource_type;
+					in_map_entries->sync_id =
+						io_cfg[i].fence;
+					num_in_buf++;
+				} else {
+					CAM_ERR(CAM_ISP, "ln_in:%d imax_ln:%d",
+						num_in_buf,
+						prepare->max_in_map_entries);
+					return -EINVAL;
+				}
+			}
+			continue;
+		} else {
+			CAM_ERR(CAM_ISP, "Invalid io config direction :%d",
+				io_cfg[i].direction);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_ISP, "setup mem io");
+		for (j = 0; j < CAM_ISP_HW_SPLIT_MAX; j++) {
+			if (!hw_mgr_res->hw_res[j])
+				continue;
+
+			if (hw_mgr_res->hw_res[j]->hw_intf->hw_idx != base_idx)
+				continue;
+
+			res = hw_mgr_res->hw_res[j];
+			if (res->res_id != io_cfg[i].resource_type) {
+				CAM_ERR(CAM_ISP,
+					"wm err res id:%d io res id:%d",
+					res->res_id, io_cfg[i].resource_type);
+				return -EINVAL;
+			}
+
+			memset(io_addr, 0, sizeof(io_addr));
+
+			for (plane_id = 0; plane_id < CAM_PACKET_MAX_PLANES;
+						plane_id++) {
+				if (!io_cfg[i].mem_handle[plane_id])
+					break;
+
+				hdl = io_cfg[i].mem_handle[plane_id];
+				if (res->process_cmd(res,
+						CAM_ISP_HW_CMD_GET_SECURE_MODE,
+						&mode,
+						sizeof(bool)))
+					return -EINVAL;
+
+				is_buf_secure = cam_mem_is_secure_buf(hdl);
+				if ((mode == CAM_SECURE_MODE_SECURE) &&
+					is_buf_secure) {
+					mmu_hdl = sec_iommu_hdl;
+				} else if (
+					(mode == CAM_SECURE_MODE_NON_SECURE) &&
+					(!is_buf_secure)) {
+					mmu_hdl = iommu_hdl;
+				} else {
+					CAM_ERR_RATE_LIMIT(CAM_ISP,
+						"Invalid hdl: port mode[%u], buf mode[%u]",
+						mode, is_buf_secure);
+					return -EINVAL;
+				}
+
+				rc = cam_mem_get_io_buf(
+					io_cfg[i].mem_handle[plane_id],
+					mmu_hdl, &io_addr[plane_id], &size);
+				if (rc) {
+					CAM_ERR(CAM_ISP,
+						"no io addr for plane%d",
+						plane_id);
+					rc = -ENOMEM;
+					return rc;
+				}
+
+				/* need to update with offset */
+				io_addr[plane_id] +=
+						io_cfg[i].offsets[plane_id];
+				CAM_DBG(CAM_ISP,
+					"get io_addr for plane %d: 0x%llx, mem_hdl=0x%x",
+					plane_id, io_addr[plane_id],
+					io_cfg[i].mem_handle[plane_id]);
+
+				CAM_DBG(CAM_ISP,
+					"mmu_hdl=0x%x, size=%d, end=0x%x",
+					mmu_hdl, (int)size,
+					io_addr[plane_id]+size);
+
+			}
+			if (!plane_id) {
+				CAM_ERR(CAM_ISP, "No valid planes for res%d",
+					res->res_id);
+				rc = -ENOMEM;
+				return rc;
+			}
+
+			if ((kmd_buf_info->used_bytes + io_cfg_used_bytes) <
+				kmd_buf_info->size) {
+				kmd_buf_remain_size = kmd_buf_info->size -
+					(kmd_buf_info->used_bytes +
+					io_cfg_used_bytes);
+			} else {
+				CAM_ERR(CAM_ISP,
+					"no free kmd memory for base %d",
+					base_idx);
+				rc = -ENOMEM;
+				return rc;
+			}
+			update_buf.res = res;
+			update_buf.cmd_type = CAM_ISP_HW_CMD_GET_BUF_UPDATE;
+			update_buf.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
+				kmd_buf_info->used_bytes/4 +
+					io_cfg_used_bytes/4;
+			wm_update.image_buf = io_addr;
+			wm_update.num_buf   = plane_id;
+			wm_update.io_cfg    = &io_cfg[i];
+			update_buf.cmd.size = kmd_buf_remain_size;
+			update_buf.wm_update = &wm_update;
+
+			CAM_DBG(CAM_ISP, "cmd buffer 0x%pK, size %d",
+				update_buf.cmd.cmd_buf_addr,
+				update_buf.cmd.size);
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_GET_BUF_UPDATE, &update_buf,
+				sizeof(struct cam_isp_hw_get_cmd_update));
+
+			if (rc) {
+				CAM_ERR(CAM_ISP, "get buf cmd error:%d",
+					res->res_id);
+				rc = -ENOMEM;
+				return rc;
+			}
+			io_cfg_used_bytes += update_buf.cmd.used_bytes;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "io_cfg_used_bytes %d, fill_fence %d",
+		io_cfg_used_bytes, fill_fence);
+	if (io_cfg_used_bytes) {
+		/* Update the HW entries */
+		num_ent = prepare->num_hw_update_entries;
+		prepare->hw_update_entries[num_ent].handle =
+					kmd_buf_info->handle;
+		prepare->hw_update_entries[num_ent].len = io_cfg_used_bytes;
+		prepare->hw_update_entries[num_ent].offset =
+			kmd_buf_info->offset;
+		CAM_DBG(CAM_ISP,
+			"num_ent=%d handle=0x%x, len=%u, offset=%u",
+			num_ent,
+			prepare->hw_update_entries[num_ent].handle,
+			prepare->hw_update_entries[num_ent].len,
+			prepare->hw_update_entries[num_ent].offset);
+		num_ent++;
+
+		kmd_buf_info->used_bytes += io_cfg_used_bytes;
+		kmd_buf_info->offset     += io_cfg_used_bytes;
+		prepare->num_hw_update_entries = num_ent;
+	}
+
+	if (fill_fence) {
+		prepare->num_out_map_entries = num_out_buf;
+		prepare->num_in_map_entries  = num_in_buf;
+	}
+
+	return rc;
+}
+
+
+int cam_isp_add_reg_update(
+	struct cam_hw_prepare_update_args    *prepare,
+	struct list_head                     *res_list_isp_src,
+	uint32_t                              base_idx,
+	struct cam_kmd_buf_info              *kmd_buf_info)
+{
+	int rc = -EINVAL;
+	struct cam_isp_resource_node         *res;
+	struct cam_ife_hw_mgr_res            *hw_mgr_res;
+	struct cam_hw_update_entry           *hw_entry;
+	struct cam_isp_hw_get_cmd_update      get_regup;
+	uint32_t kmd_buf_remain_size, num_ent, i, reg_update_size;
+
+	hw_entry = prepare->hw_update_entries;
+	/* Max one hw entries required for each base */
+	if (prepare->num_hw_update_entries + 1 >=
+				prepare->max_hw_update_entries) {
+		CAM_ERR(CAM_ISP, "Insufficient  HW entries :%d %d",
+			prepare->num_hw_update_entries,
+			prepare->max_hw_update_entries);
+		return -EINVAL;
+	}
+
+	reg_update_size = 0;
+	list_for_each_entry(hw_mgr_res, res_list_isp_src, list) {
+		if (hw_mgr_res->res_type == CAM_IFE_HW_MGR_RES_UNINIT)
+			continue;
+
+		for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+			if (!hw_mgr_res->hw_res[i])
+				continue;
+
+			res = hw_mgr_res->hw_res[i];
+			if (res->hw_intf->hw_idx != base_idx)
+				continue;
+
+			if (kmd_buf_info->size > (kmd_buf_info->used_bytes +
+				reg_update_size)) {
+				kmd_buf_remain_size =  kmd_buf_info->size -
+					(kmd_buf_info->used_bytes +
+					reg_update_size);
+			} else {
+				CAM_ERR(CAM_ISP, "no free mem %d %d %d",
+					base_idx, kmd_buf_info->size,
+					kmd_buf_info->used_bytes +
+					reg_update_size);
+				rc = -EINVAL;
+				return rc;
+			}
+
+			get_regup.cmd.cmd_buf_addr = kmd_buf_info->cpu_addr +
+				kmd_buf_info->used_bytes/4 +
+				reg_update_size/4;
+			get_regup.cmd.size = kmd_buf_remain_size;
+			get_regup.cmd_type = CAM_ISP_HW_CMD_GET_REG_UPDATE;
+			get_regup.res = res;
+
+			rc = res->hw_intf->hw_ops.process_cmd(
+				res->hw_intf->hw_priv,
+				CAM_ISP_HW_CMD_GET_REG_UPDATE, &get_regup,
+				sizeof(struct cam_isp_hw_get_cmd_update));
+			if (rc)
+				return rc;
+
+			CAM_DBG(CAM_ISP, "Reg update added for res %d hw_id %d",
+				res->res_id, res->hw_intf->hw_idx);
+			reg_update_size += get_regup.cmd.used_bytes;
+		}
+	}
+
+	if (reg_update_size) {
+		/* Update the HW entries */
+		num_ent = prepare->num_hw_update_entries;
+		prepare->hw_update_entries[num_ent].handle =
+					kmd_buf_info->handle;
+		prepare->hw_update_entries[num_ent].len = reg_update_size;
+		prepare->hw_update_entries[num_ent].offset =
+			kmd_buf_info->offset;
+		CAM_DBG(CAM_ISP,
+			"num_ent=%d handle=0x%x, len=%u, offset=%u",
+			num_ent,
+			prepare->hw_update_entries[num_ent].handle,
+			prepare->hw_update_entries[num_ent].len,
+			prepare->hw_update_entries[num_ent].offset);
+		num_ent++;
+
+		kmd_buf_info->used_bytes += reg_update_size;
+		kmd_buf_info->offset     += reg_update_size;
+		prepare->num_hw_update_entries = num_ent;
+		/* reg update is success return status 0 */
+		rc = 0;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
new file mode 100644
index 0000000..a29804f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/cam_tasklet_util.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/ratelimit.h>
+#include "cam_tasklet_util.h"
+#include "cam_irq_controller.h"
+#include "cam_debug_util.h"
+
+#define CAM_TASKLETQ_SIZE              256
+
+static void cam_tasklet_action(unsigned long data);
+
+/**
+ * struct cam_tasklet_queue_cmd:
+ * @Brief:                  Structure associated with each slot in the
+ *                          tasklet queue
+ *
+ * @list:                   list_head member for each entry in queue
+ * @payload:                Payload structure for the event. This will be
+ *                          passed to the handler function
+ * @bottom_half_handler:    Function pointer for event handler in bottom
+ *                          half context
+ *
+ */
+struct cam_tasklet_queue_cmd {
+	struct list_head                   list;
+	void                              *payload;
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler;
+};
+
+/**
+ * struct cam_tasklet_info:
+ * @Brief:                  Tasklet private structure
+ *
+ * @list:                   list_head member for each tasklet
+ * @index:                  Instance id for the tasklet
+ * @tasklet_lock:           Spin lock
+ * @tasklet_active:         Atomic variable to control tasklet state
+ * @tasklet:                Tasklet structure used to schedule bottom half
+ * @free_cmd_list:          List of free tasklet queue cmd for use
+ * @used_cmd_list:          List of used tasklet queue cmd
+ * @cmd_queue:              Array of tasklet cmd for storage
+ * @ctx_priv:               Private data passed to the handling function
+ *
+ */
+struct cam_tasklet_info {
+	struct list_head                   list;
+	uint32_t                           index;
+	spinlock_t                         tasklet_lock;
+	atomic_t                           tasklet_active;
+	struct tasklet_struct              tasklet;
+
+	struct list_head                   free_cmd_list;
+	struct list_head                   used_cmd_list;
+	struct cam_tasklet_queue_cmd       cmd_queue[CAM_TASKLETQ_SIZE];
+
+	void                              *ctx_priv;
+};
+
+struct cam_irq_bh_api tasklet_bh_api = {
+	.bottom_half_enqueue_func = cam_tasklet_enqueue_cmd,
+	.get_bh_payload_func = cam_tasklet_get_cmd,
+	.put_bh_payload_func = cam_tasklet_put_cmd,
+};
+
+int cam_tasklet_get_cmd(
+	void                         *bottom_half,
+	void                        **bh_cmd)
+{
+	int           rc = 0;
+	unsigned long flags;
+	struct cam_tasklet_info        *tasklet = bottom_half;
+	struct cam_tasklet_queue_cmd   *tasklet_cmd = NULL;
+
+	*bh_cmd = NULL;
+
+	if (tasklet == NULL) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "tasklet is NULL");
+		return -EINVAL;
+	}
+
+	if (!atomic_read(&tasklet->tasklet_active)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet is not active");
+		rc = -EPIPE;
+		return rc;
+	}
+
+	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+	if (list_empty(&tasklet->free_cmd_list)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No more free tasklet cmd");
+		rc = -ENODEV;
+		goto spin_unlock;
+	} else {
+		tasklet_cmd = list_first_entry(&tasklet->free_cmd_list,
+			struct cam_tasklet_queue_cmd, list);
+		list_del_init(&(tasklet_cmd)->list);
+		*bh_cmd = tasklet_cmd;
+	}
+
+spin_unlock:
+	spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+	return rc;
+}
+
+void cam_tasklet_put_cmd(
+	void                         *bottom_half,
+	void                        **bh_cmd)
+{
+	unsigned long flags;
+	struct cam_tasklet_info        *tasklet = bottom_half;
+	struct cam_tasklet_queue_cmd   *tasklet_cmd = *bh_cmd;
+
+	if (tasklet == NULL) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "tasklet is NULL");
+		return;
+	}
+
+	if (tasklet_cmd == NULL) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid tasklet_cmd");
+		return;
+	}
+
+	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+	list_del_init(&tasklet_cmd->list);
+	list_add_tail(&tasklet_cmd->list, &tasklet->free_cmd_list);
+	*bh_cmd = NULL;
+	spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+}
+
+/**
+ * cam_tasklet_dequeue_cmd()
+ *
+ * @brief:              Initialize the tasklet info structure
+ *
+ * @hw_mgr_ctx:         Private Ctx data that will be passed to the handler
+ *                      function
+ * @idx:                Index of tasklet used as identity
+ * @tasklet_action:     Tasklet callback function that will be called
+ *                      when tasklet runs on CPU
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+static int cam_tasklet_dequeue_cmd(
+	struct cam_tasklet_info        *tasklet,
+	struct cam_tasklet_queue_cmd  **tasklet_cmd)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	*tasklet_cmd = NULL;
+
+	CAM_DBG(CAM_ISP, "Dequeue before lock.");
+	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+	if (list_empty(&tasklet->used_cmd_list)) {
+		CAM_DBG(CAM_ISP, "End of list reached. Exit");
+		rc = -ENODEV;
+		goto spin_unlock;
+	} else {
+		*tasklet_cmd = list_first_entry(&tasklet->used_cmd_list,
+			struct cam_tasklet_queue_cmd, list);
+		list_del_init(&(*tasklet_cmd)->list);
+		CAM_DBG(CAM_ISP, "Dequeue Successful");
+	}
+
+spin_unlock:
+	spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+	return rc;
+}
+
+void cam_tasklet_enqueue_cmd(
+	void                              *bottom_half,
+	void                              *bh_cmd,
+	void                              *handler_priv,
+	void                              *evt_payload_priv,
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler)
+{
+	unsigned long                  flags;
+	struct cam_tasklet_queue_cmd  *tasklet_cmd = bh_cmd;
+	struct cam_tasklet_info       *tasklet = bottom_half;
+
+	if (!bottom_half) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "NULL bottom half");
+		return;
+	}
+
+	if (!bh_cmd) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "NULL bh cmd");
+		return;
+	}
+
+	if (!atomic_read(&tasklet->tasklet_active)) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Tasklet is not active\n");
+		return;
+	}
+
+	CAM_DBG(CAM_ISP, "Enqueue tasklet cmd");
+	tasklet_cmd->bottom_half_handler = bottom_half_handler;
+	tasklet_cmd->payload = evt_payload_priv;
+	spin_lock_irqsave(&tasklet->tasklet_lock, flags);
+	list_add_tail(&tasklet_cmd->list,
+		&tasklet->used_cmd_list);
+	spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
+	tasklet_schedule(&tasklet->tasklet);
+}
+
+int cam_tasklet_init(
+	void                    **tasklet_info,
+	void                     *hw_mgr_ctx,
+	uint32_t                  idx)
+{
+	int i;
+	struct cam_tasklet_info  *tasklet = NULL;
+
+	tasklet = kzalloc(sizeof(struct cam_tasklet_info), GFP_KERNEL);
+	if (!tasklet) {
+		CAM_DBG(CAM_ISP,
+			"Error! Unable to allocate memory for tasklet");
+		*tasklet_info = NULL;
+		return -ENOMEM;
+	}
+
+	tasklet->ctx_priv = hw_mgr_ctx;
+	tasklet->index = idx;
+	spin_lock_init(&tasklet->tasklet_lock);
+	memset(tasklet->cmd_queue, 0, sizeof(tasklet->cmd_queue));
+	INIT_LIST_HEAD(&tasklet->free_cmd_list);
+	INIT_LIST_HEAD(&tasklet->used_cmd_list);
+	for (i = 0; i < CAM_TASKLETQ_SIZE; i++) {
+		INIT_LIST_HEAD(&tasklet->cmd_queue[i].list);
+		list_add_tail(&tasklet->cmd_queue[i].list,
+			&tasklet->free_cmd_list);
+	}
+	tasklet_init(&tasklet->tasklet, cam_tasklet_action,
+		(unsigned long)tasklet);
+	tasklet_disable(&tasklet->tasklet);
+
+	*tasklet_info = tasklet;
+
+	return 0;
+}
+
+void cam_tasklet_deinit(void    **tasklet_info)
+{
+	struct cam_tasklet_info *tasklet = *tasklet_info;
+
+	if (atomic_read(&tasklet->tasklet_active)) {
+		atomic_set(&tasklet->tasklet_active, 0);
+		tasklet_kill(&tasklet->tasklet);
+		tasklet_disable(&tasklet->tasklet);
+	}
+	kfree(tasklet);
+	*tasklet_info = NULL;
+}
+
+static inline void cam_tasklet_flush(struct cam_tasklet_info *tasklet_info)
+{
+	cam_tasklet_action((unsigned long) tasklet_info);
+}
+
+int cam_tasklet_start(void  *tasklet_info)
+{
+	struct cam_tasklet_info       *tasklet = tasklet_info;
+	int i = 0;
+
+	if (atomic_read(&tasklet->tasklet_active)) {
+		CAM_ERR(CAM_ISP, "Tasklet already active. idx = %d",
+			tasklet->index);
+		return -EBUSY;
+	}
+
+	/* clean up the command queue first */
+	for (i = 0; i < CAM_TASKLETQ_SIZE; i++) {
+		list_del_init(&tasklet->cmd_queue[i].list);
+		list_add_tail(&tasklet->cmd_queue[i].list,
+			&tasklet->free_cmd_list);
+	}
+
+	atomic_set(&tasklet->tasklet_active, 1);
+
+	tasklet_enable(&tasklet->tasklet);
+
+	return 0;
+}
+
+void cam_tasklet_stop(void  *tasklet_info)
+{
+	struct cam_tasklet_info  *tasklet = tasklet_info;
+
+	atomic_set(&tasklet->tasklet_active, 0);
+	tasklet_kill(&tasklet->tasklet);
+	tasklet_disable(&tasklet->tasklet);
+	cam_tasklet_flush(tasklet);
+}
+
+/*
+ * cam_tasklet_action()
+ *
+ * @brief:              Process function that will be called  when tasklet runs
+ *                      on CPU
+ *
+ * @data:               Tasklet Info structure that is passed in tasklet_init
+ *
+ * @return:             Void
+ */
+static void cam_tasklet_action(unsigned long data)
+{
+	struct cam_tasklet_info          *tasklet_info = NULL;
+	struct cam_tasklet_queue_cmd     *tasklet_cmd = NULL;
+
+	tasklet_info = (struct cam_tasklet_info *)data;
+
+	while (!cam_tasklet_dequeue_cmd(tasklet_info, &tasklet_cmd)) {
+		tasklet_cmd->bottom_half_handler(tasklet_info->ctx_priv,
+			tasklet_cmd->payload);
+		cam_tasklet_put_cmd(tasklet_info, (void **)(&tasklet_cmd));
+	}
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
new file mode 100644
index 0000000..05d8461
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_isp_packet_parser.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ISP_HW_PARSER_H_
+#define _CAM_ISP_HW_PARSER_H_
+
+#include <linux/types.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_hw_intf.h"
+#include "cam_packet_util.h"
+
+/*
+ * struct cam_isp_generic_blob_info
+ *
+ * @prepare:            Payload for prepare command
+ * @ctx_base_info:      Base hardware information for the context
+ * @kmd_buf_info:       Kmd buffer to store the custom cmd data
+ */
+struct cam_isp_generic_blob_info {
+	struct cam_hw_prepare_update_args     *prepare;
+	struct ctx_base_info                  *base_info;
+	struct cam_kmd_buf_info               *kmd_buf_info;
+};
+
+/*
+ * cam_isp_add_change_base()
+ *
+ * @brief                  Add change base in the hw entries list
+ *                         processe the isp source list get the change base from
+ *                         ISP HW instance
+ *
+ * @prepare:               Contain the packet and HW update variables
+ * @res_list_isp_src:      Resource list for IFE/VFE source
+ * @base_idx:              Base or dev index of the IFE/VFE HW instance for
+ *                         which change change base need to be added
+ * @kmd_buf_info:          Kmd buffer to store the change base command
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_add_change_base(
+	struct cam_hw_prepare_update_args     *prepare,
+	struct list_head                      *res_list_isp_src,
+	uint32_t                               base_idx,
+	struct cam_kmd_buf_info               *kmd_buf_info);
+
+/*
+ * cam_isp_add_cmd_buf_update()
+ *
+ * @brief                  Add command buffer in the HW entries list for given
+ *                         Blob Data.
+ *
+ * @hw_mgr_res:            HW resource to get the update from
+ * @cmd_type:              Cmd type to get update for
+ * @hw_cmd_type:           HW Cmd type corresponding to cmd_type
+ * @base_idx:              Base hardware index
+ * @cmd_buf_addr:          Cpu buf addr of kmd scratch buffer
+ * @kmd_buf_remain_size:   Remaining size left for cmd buffer update
+ * @cmd_update_data:       Data needed by HW to process the cmd and provide
+ *                         cmd buffer
+ * @bytes_used:            Address of the field to be populated with
+ *                         total bytes used as output to caller
+ *
+ * @return:                Negative for Failure
+ *                         otherwise returns bytes used
+ */
+int cam_isp_add_cmd_buf_update(
+	struct cam_ife_hw_mgr_res            *hw_mgr_res,
+	uint32_t                              cmd_type,
+	uint32_t                              hw_cmd_type,
+	uint32_t                              base_idx,
+	uint32_t                             *cmd_buf_addr,
+	uint32_t                              kmd_buf_remain_size,
+	void                                 *cmd_update_data,
+	uint32_t                             *bytes_used);
+
+/*
+ * cam_isp_add_command_buffers()
+ *
+ * @brief                  Add command buffer in the HW entries list for given
+ *                         left or right VFE/IFE instance.
+ *
+ * @prepare:               Contain the packet and HW update variables
+ * @kmd_buf_info:          KMD buffer to store the custom cmd data
+ * @base_info:             base hardware information
+ * @blob_handler_cb:       Call_back_function for Meta handling
+ * @res_list_isp_out:      IFE /VFE out resource list
+ * @size_isp_out:          Size of the res_list_isp_out array
+ *
+ * @return:                0 for success
+ *                         Negative for Failure
+ */
+int cam_isp_add_command_buffers(
+	struct cam_hw_prepare_update_args  *prepare,
+	struct cam_kmd_buf_info            *kmd_buf_info,
+	struct ctx_base_info               *base_info,
+	cam_packet_generic_blob_handler     blob_handler_cb,
+	struct cam_ife_hw_mgr_res          *res_list_isp_out,
+	uint32_t                            size_isp_out);
+
+/*
+ * cam_isp_add_io_buffers()
+ *
+ * @brief                  Add io buffer configurations in the HW entries list
+ *                         processe the io configurations based on the base
+ *                         index and update the HW entries list
+ *
+ * @iommu_hdl:             Iommu handle to get the IO buf from memory manager
+ * @sec_iommu_hdl:         Secure iommu handle to get the IO buf from
+ *                         memory manager
+ * @prepare:               Contain the  packet and HW update variables
+ * @base_idx:              Base or dev index of the IFE/VFE HW instance
+ * @kmd_buf_info:          Kmd buffer to store the change base command
+ * @res_list_isp_out:      IFE /VFE out resource list
+ * @size_isp_out:          Size of the res_list_isp_out array
+ * @fill_fence:            If true, Fence map table will be filled
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_add_io_buffers(
+	int                                   iommu_hdl,
+	int                                   sec_iommu_hdl,
+	struct cam_hw_prepare_update_args    *prepare,
+	uint32_t                              base_idx,
+	struct cam_kmd_buf_info              *kmd_buf_info,
+	struct cam_ife_hw_mgr_res            *res_list_isp_out,
+	uint32_t                              size_isp_out,
+	bool                                  fill_fence);
+
+/*
+ * cam_isp_add_reg_update()
+ *
+ * @brief                  Add reg update in the hw entries list
+ *                         processe the isp source list get the reg update from
+ *                         ISP HW instance
+ *
+ * @prepare:               Contain the  packet and HW update variables
+ * @res_list_isp_src:      Resource list for IFE/VFE source
+ * @base_idx:              Base or dev index of the IFE/VFE HW instance
+ * @kmd_buf_info:          Kmd buffer to store the change base command
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_isp_add_reg_update(
+	struct cam_hw_prepare_update_args    *prepare,
+	struct list_head                     *res_list_isp_src,
+	uint32_t                              base_idx,
+	struct cam_kmd_buf_info              *kmd_buf_info);
+
+#endif /*_CAM_ISP_HW_PARSER_H */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h
new file mode 100644
index 0000000..35c4a09
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include/cam_tasklet_util.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_TASKLET_UTIL_H_
+#define _CAM_TASKLET_UTIL_H_
+
+#include "cam_irq_controller.h"
+
+/*
+ * cam_tasklet_init()
+ *
+ * @brief:              Initialize the tasklet info structure
+ *
+ * @tasklet:            Tasklet to initialize
+ * @hw_mgr_ctx:         Private Ctx data that will be passed to the handler
+ *                      function
+ * @idx:                Index of tasklet used as identity
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_tasklet_init(
+	void                   **tasklet,
+	void                    *hw_mgr_ctx,
+	uint32_t                 idx);
+
+/*
+ * cam_tasklet_deinit()
+ *
+ * @brief:              Deinitialize the tasklet info structure
+ *
+ * @tasklet:            Tasklet to deinitialize
+ *
+ * @return:             Void
+ */
+void cam_tasklet_deinit(void   **tasklet);
+
+/*
+ * cam_tasklet_start()
+ *
+ * @brief:              Enable the tasklet to be scheduled and run.
+ *                      Caller should make sure this function is called
+ *                      before trying to enqueue.
+ *
+ * @tasklet:            Tasklet to start
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_tasklet_start(void    *tasklet);
+
+/*
+ * cam_tasklet_stop()
+ *
+ * @brief:              Disable the tasklet so it can no longer be scheduled.
+ *                      Need to enable again to run.
+ *
+ * @tasklet:            Tasklet to stop
+ *
+ * @return:             Void
+ */
+void cam_tasklet_stop(void    *tasklet);
+
+/*
+ * cam_tasklet_enqueue_cmd()
+ *
+ * @brief:               Enqueue the tasklet_cmd to used list
+ *
+ * @bottom_half:         Tasklet info to enqueue onto
+ * @bh_cmd:              Tasklet cmd used to enqueue task
+ * @handler_priv:        Private Handler data that will be passed to the
+ *                       handler function
+ * @evt_payload_priv:    Event payload that will be passed to the handler
+ *                       function
+ * @bottom_half_handler: Callback function that will be called by tasklet
+ *                       for handling event
+ *
+ * @return:              Void
+ */
+void cam_tasklet_enqueue_cmd(
+	void                              *bottom_half,
+	void                              *bh_cmd,
+	void                              *handler_priv,
+	void                              *evt_payload_priv,
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler);
+
+/**
+ * cam_tasklet_get_cmd()
+ *
+ * @brief:              Get free cmd from tasklet
+ *
+ * @bottom_half:        Tasklet Info structure to get cmd from
+ * @bh_cmd:             Return tasklet_cmd pointer if successful
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_tasklet_get_cmd(void *bottom_half, void **bh_cmd);
+
+/**
+ * cam_tasklet_put_cmd()
+ *
+ * @brief:              Put back cmd to free list
+ *
+ * @bottom_half:        Tasklet Info structure to put cmd into
+ * @bh_cmd:             tasklet_cmd pointer that needs to be put back
+ *
+ * @return:             Void
+ */
+void cam_tasklet_put_cmd(void *bottom_half, void **bh_cmd);
+
+extern struct cam_irq_bh_api tasklet_bh_api;
+
+#endif /* _CAM_TASKLET_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile
new file mode 100644
index 0000000..6014b8c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_irq_controller.o
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
new file mode 100644
index 0000000..2be388b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.c
@@ -0,0 +1,730 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/ratelimit.h>
+
+#include "cam_io_util.h"
+#include "cam_irq_controller.h"
+#include "cam_debug_util.h"
+
+/**
+ * struct cam_irq_evt_handler:
+ * @Brief:                  Event handler information
+ *
+ * @priority:               Priority level of this event
+ * @evt_bit_mask_arr:       evt_bit_mask that has the bits set for IRQs to
+ *                          subscribe for
+ * @handler_priv:           Private data that will be passed to the Top/Bottom
+ *                          Half handler function
+ * @top_half_handler:       Top half Handler callback function
+ * @bottom_half_handler:    Bottom half Handler callback function
+ * @bottom_half:            Pointer to bottom_half implementation on which to
+ *                          enqueue the event for further handling
+ * @bottom_half_enqueue_func:
+ *                          Function used to enqueue the bottom_half event
+ * @list_node:              list_head struct used for overall handler List
+ * @th_list_node:           list_head struct used for top half handler List
+ */
+struct cam_irq_evt_handler {
+	enum cam_irq_priority_level        priority;
+	uint32_t                          *evt_bit_mask_arr;
+	void                              *handler_priv;
+	CAM_IRQ_HANDLER_TOP_HALF           top_half_handler;
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler;
+	void                              *bottom_half;
+	struct cam_irq_bh_api              irq_bh_api;
+	struct list_head                   list_node;
+	struct list_head                   th_list_node;
+	int                                index;
+};
+
+/**
+ * struct cam_irq_register_obj:
+ * @Brief:                  Structure containing information related to
+ *                          a particular register Set
+ *
+ * @index:                  Index of set in Array
+ * @mask_reg_offset:        Offset of IRQ MASK register
+ * @clear_reg_offset:       Offset of IRQ CLEAR register
+ * @status_reg_offset:      Offset of IRQ STATUS register
+ * @top_half_enable_mask:   Array of enabled bit_mask sorted by priority
+ */
+struct cam_irq_register_obj {
+	uint32_t                     index;
+	uint32_t                     mask_reg_offset;
+	uint32_t                     clear_reg_offset;
+	uint32_t                     status_reg_offset;
+	uint32_t                     top_half_enable_mask[CAM_IRQ_PRIORITY_MAX];
+};
+
+/**
+ * struct cam_irq_controller:
+ *
+ * @brief:                  IRQ Controller structure.
+ *
+ * @name:                   Name of IRQ Controller block
+ * @mem_base:               Mapped base address of register space to which
+ *                          register offsets are added to access registers
+ * @num_registers:          Number of sets(mask/clear/status) of IRQ registers
+ * @irq_register_arr:       Array of Register object associated with this
+ *                          Controller
+ * @irq_status_arr:         Array of IRQ Status values
+ * @global_clear_offset:    Offset of Global IRQ Clear register. This register
+ *                          contains the BIT that needs to be set for the CLEAR
+ *                          to take effect
+ * @global_clear_bitmask:   Bitmask needed to be used in Global Clear register
+ *                          for Clear IRQ cmd to take effect
+ * @evt_handler_list_head:  List of all event handlers
+ * @th_list_head:           List of handlers sorted by priority
+ * @hdl_idx:                Unique identity of handler assigned on Subscribe.
+ *                          Used to Unsubscribe.
+ * @lock:                   Lock for use by controller
+ */
+struct cam_irq_controller {
+	const char                     *name;
+	void __iomem                   *mem_base;
+	uint32_t                        num_registers;
+	struct cam_irq_register_obj    *irq_register_arr;
+	uint32_t                       *irq_status_arr;
+	uint32_t                        global_clear_offset;
+	uint32_t                        global_clear_bitmask;
+	struct list_head                evt_handler_list_head;
+	struct list_head                th_list_head[CAM_IRQ_PRIORITY_MAX];
+	uint32_t                        hdl_idx;
+	spinlock_t                      lock;
+	struct cam_irq_th_payload       th_payload;
+};
+
+int cam_irq_controller_deinit(void **irq_controller)
+{
+	struct cam_irq_controller *controller = *irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+
+	while (!list_empty(&controller->evt_handler_list_head)) {
+		evt_handler = list_first_entry(
+			&controller->evt_handler_list_head,
+			struct cam_irq_evt_handler, list_node);
+		list_del_init(&evt_handler->list_node);
+		kfree(evt_handler->evt_bit_mask_arr);
+		kfree(evt_handler);
+	}
+
+	kfree(controller->th_payload.evt_status_arr);
+	kfree(controller->irq_status_arr);
+	kfree(controller->irq_register_arr);
+	kfree(controller);
+	*irq_controller = NULL;
+	return 0;
+}
+
+int cam_irq_controller_init(const char       *name,
+	void __iomem                         *mem_base,
+	struct cam_irq_controller_reg_info   *register_info,
+	void                                **irq_controller)
+{
+	struct cam_irq_controller *controller = NULL;
+	int i, rc = 0;
+
+	*irq_controller = NULL;
+
+	if (!register_info->num_registers || !register_info->irq_reg_set ||
+		!name || !mem_base) {
+		CAM_ERR(CAM_IRQ_CTRL, "Invalid parameters");
+		rc = -EINVAL;
+		return rc;
+	}
+
+	controller = kzalloc(sizeof(struct cam_irq_controller), GFP_KERNEL);
+	if (!controller) {
+		CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ Controller");
+		return -ENOMEM;
+	}
+
+	controller->irq_register_arr = kzalloc(register_info->num_registers *
+		sizeof(struct cam_irq_register_obj), GFP_KERNEL);
+	if (!controller->irq_register_arr) {
+		CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ register Arr");
+		rc = -ENOMEM;
+		goto reg_alloc_error;
+	}
+
+	controller->irq_status_arr = kzalloc(register_info->num_registers *
+		sizeof(uint32_t), GFP_KERNEL);
+	if (!controller->irq_status_arr) {
+		CAM_DBG(CAM_IRQ_CTRL, "Failed to allocate IRQ status Arr");
+		rc = -ENOMEM;
+		goto status_alloc_error;
+	}
+
+	controller->th_payload.evt_status_arr =
+		kzalloc(register_info->num_registers * sizeof(uint32_t),
+		GFP_KERNEL);
+	if (!controller->th_payload.evt_status_arr) {
+		CAM_DBG(CAM_IRQ_CTRL,
+			"Failed to allocate BH payload bit mask Arr");
+		rc = -ENOMEM;
+		goto evt_mask_alloc_error;
+	}
+
+	controller->name = name;
+
+	CAM_DBG(CAM_IRQ_CTRL, "num_registers: %d",
+		register_info->num_registers);
+	for (i = 0; i < register_info->num_registers; i++) {
+		controller->irq_register_arr[i].index = i;
+		controller->irq_register_arr[i].mask_reg_offset =
+			register_info->irq_reg_set[i].mask_reg_offset;
+		controller->irq_register_arr[i].clear_reg_offset =
+			register_info->irq_reg_set[i].clear_reg_offset;
+		controller->irq_register_arr[i].status_reg_offset =
+			register_info->irq_reg_set[i].status_reg_offset;
+		CAM_DBG(CAM_IRQ_CTRL, "i %d mask_reg_offset: 0x%x", i,
+			controller->irq_register_arr[i].mask_reg_offset);
+		CAM_DBG(CAM_IRQ_CTRL, "i %d clear_reg_offset: 0x%x", i,
+			controller->irq_register_arr[i].clear_reg_offset);
+		CAM_DBG(CAM_IRQ_CTRL, "i %d status_reg_offset: 0x%x", i,
+			controller->irq_register_arr[i].status_reg_offset);
+	}
+	controller->num_registers        = register_info->num_registers;
+	controller->global_clear_bitmask = register_info->global_clear_bitmask;
+	controller->global_clear_offset  = register_info->global_clear_offset;
+	controller->mem_base             = mem_base;
+
+	CAM_DBG(CAM_IRQ_CTRL, "global_clear_bitmask: 0x%x",
+		controller->global_clear_bitmask);
+	CAM_DBG(CAM_IRQ_CTRL, "global_clear_offset: 0x%x",
+		controller->global_clear_offset);
+	CAM_DBG(CAM_IRQ_CTRL, "mem_base: %pK",
+		(void __iomem *)controller->mem_base);
+
+	INIT_LIST_HEAD(&controller->evt_handler_list_head);
+	for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++)
+		INIT_LIST_HEAD(&controller->th_list_head[i]);
+
+	spin_lock_init(&controller->lock);
+
+	controller->hdl_idx = 1;
+	*irq_controller = controller;
+
+	return rc;
+
+evt_mask_alloc_error:
+	kfree(controller->irq_status_arr);
+status_alloc_error:
+	kfree(controller->irq_register_arr);
+reg_alloc_error:
+	kfree(controller);
+
+	return rc;
+}
+
+int cam_irq_controller_subscribe_irq(void *irq_controller,
+	enum cam_irq_priority_level        priority,
+	uint32_t                          *evt_bit_mask_arr,
+	void                              *handler_priv,
+	CAM_IRQ_HANDLER_TOP_HALF           top_half_handler,
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler,
+	void                              *bottom_half,
+	struct cam_irq_bh_api             *irq_bh_api)
+{
+	struct cam_irq_controller  *controller  = irq_controller;
+	struct cam_irq_evt_handler *evt_handler = NULL;
+	int                         i;
+	int                         rc = 0;
+	uint32_t                    irq_mask;
+	unsigned long               flags = 0;
+	bool                        need_lock;
+
+	if (!controller || !handler_priv || !evt_bit_mask_arr) {
+		CAM_ERR(CAM_IRQ_CTRL,
+			"Inval params: ctlr=%pK hdl_priv=%pK bit_mask_arr=%pK",
+			controller, handler_priv, evt_bit_mask_arr);
+		return -EINVAL;
+	}
+
+	if (!top_half_handler) {
+		CAM_ERR(CAM_IRQ_CTRL, "Missing top half handler");
+		return -EINVAL;
+	}
+
+	if (bottom_half_handler &&
+		(!bottom_half || !irq_bh_api)) {
+		CAM_ERR(CAM_IRQ_CTRL,
+			"Invalid params: bh_handler=%pK bh=%pK bh_enq_f=%pK",
+			bottom_half_handler,
+			bottom_half,
+			irq_bh_api);
+		return -EINVAL;
+	}
+
+	if (irq_bh_api &&
+		(!irq_bh_api->bottom_half_enqueue_func ||
+		!irq_bh_api->get_bh_payload_func ||
+		!irq_bh_api->put_bh_payload_func)) {
+		CAM_ERR(CAM_IRQ_CTRL,
+			"Invalid: enqueue_func=%pK get_bh=%pK put_bh=%pK",
+			irq_bh_api->bottom_half_enqueue_func,
+			irq_bh_api->get_bh_payload_func,
+			irq_bh_api->put_bh_payload_func);
+		return -EINVAL;
+	}
+
+	if (priority >= CAM_IRQ_PRIORITY_MAX) {
+		CAM_ERR(CAM_IRQ_CTRL, "Invalid priority=%u, max=%u", priority,
+			CAM_IRQ_PRIORITY_MAX);
+		return -EINVAL;
+	}
+
+	evt_handler = kzalloc(sizeof(struct cam_irq_evt_handler), GFP_KERNEL);
+	if (!evt_handler) {
+		CAM_DBG(CAM_IRQ_CTRL, "Error allocating hlist_node");
+		return -ENOMEM;
+	}
+
+	evt_handler->evt_bit_mask_arr = kzalloc(sizeof(uint32_t) *
+		controller->num_registers, GFP_KERNEL);
+	if (!evt_handler->evt_bit_mask_arr) {
+		CAM_DBG(CAM_IRQ_CTRL, "Error allocating hlist_node");
+		rc = -ENOMEM;
+		goto free_evt_handler;
+	}
+
+	INIT_LIST_HEAD(&evt_handler->list_node);
+	INIT_LIST_HEAD(&evt_handler->th_list_node);
+
+	for (i = 0; i < controller->num_registers; i++)
+		evt_handler->evt_bit_mask_arr[i] = evt_bit_mask_arr[i];
+
+	evt_handler->priority                 = priority;
+	evt_handler->handler_priv             = handler_priv;
+	evt_handler->top_half_handler         = top_half_handler;
+	evt_handler->bottom_half_handler      = bottom_half_handler;
+	evt_handler->bottom_half              = bottom_half;
+	evt_handler->index                    = controller->hdl_idx++;
+
+	if (irq_bh_api)
+		evt_handler->irq_bh_api       = *irq_bh_api;
+
+	/* Avoid rollover to negative values */
+	if (controller->hdl_idx > 0x3FFFFFFF)
+		controller->hdl_idx = 1;
+
+	need_lock = !in_irq();
+	if (need_lock)
+		spin_lock_irqsave(&controller->lock, flags);
+	for (i = 0; i < controller->num_registers; i++) {
+		controller->irq_register_arr[i].top_half_enable_mask[priority]
+			|= evt_bit_mask_arr[i];
+
+		irq_mask = cam_io_r_mb(controller->mem_base +
+			controller->irq_register_arr[i].mask_reg_offset);
+		irq_mask |= evt_bit_mask_arr[i];
+
+		cam_io_w_mb(irq_mask, controller->mem_base +
+			controller->irq_register_arr[i].mask_reg_offset);
+	}
+
+	list_add_tail(&evt_handler->list_node,
+		&controller->evt_handler_list_head);
+	list_add_tail(&evt_handler->th_list_node,
+		&controller->th_list_head[priority]);
+
+	if (need_lock)
+		spin_unlock_irqrestore(&controller->lock, flags);
+
+	return evt_handler->index;
+
+free_evt_handler:
+	kfree(evt_handler);
+	evt_handler = NULL;
+
+	return rc;
+}
+
+int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle)
+{
+	struct cam_irq_controller   *controller  = irq_controller;
+	struct cam_irq_evt_handler  *evt_handler = NULL;
+	struct cam_irq_evt_handler  *evt_handler_temp;
+	struct cam_irq_register_obj *irq_register = NULL;
+	enum cam_irq_priority_level priority;
+	unsigned long               flags = 0;
+	unsigned int                i;
+	uint32_t                    irq_mask;
+	uint32_t                    found = 0;
+	int                         rc = -EINVAL;
+	bool                        need_lock;
+
+	if (!controller)
+		return rc;
+
+	need_lock = !in_irq();
+	if (need_lock)
+		spin_lock_irqsave(&controller->lock, flags);
+
+	list_for_each_entry_safe(evt_handler, evt_handler_temp,
+		&controller->evt_handler_list_head, list_node) {
+		if (evt_handler->index == handle) {
+			CAM_DBG(CAM_IRQ_CTRL, "enable item %d", handle);
+			found = 1;
+			rc = 0;
+			break;
+		}
+	}
+
+	if (!found) {
+		if (need_lock)
+			spin_unlock_irqrestore(&controller->lock, flags);
+		return rc;
+	}
+
+	priority = evt_handler->priority;
+	for (i = 0; i < controller->num_registers; i++) {
+		irq_register = &controller->irq_register_arr[i];
+		irq_register->top_half_enable_mask[priority] |=
+			evt_handler->evt_bit_mask_arr[i];
+
+		irq_mask = cam_io_r_mb(controller->mem_base +
+			irq_register->mask_reg_offset);
+		irq_mask |= evt_handler->evt_bit_mask_arr[i];
+
+		cam_io_w_mb(irq_mask, controller->mem_base +
+		controller->irq_register_arr[i].mask_reg_offset);
+	}
+	if (need_lock)
+		spin_unlock_irqrestore(&controller->lock, flags);
+
+	return rc;
+}
+
+int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle)
+{
+	struct cam_irq_controller   *controller  = irq_controller;
+	struct cam_irq_evt_handler  *evt_handler = NULL;
+	struct cam_irq_evt_handler  *evt_handler_temp;
+	struct cam_irq_register_obj *irq_register;
+	enum cam_irq_priority_level priority;
+	unsigned long               flags = 0;
+	unsigned int                i;
+	uint32_t                    irq_mask;
+	uint32_t                    found = 0;
+	int                         rc = -EINVAL;
+	bool                        need_lock;
+
+	if (!controller)
+		return rc;
+
+	need_lock = !in_irq();
+	if (need_lock)
+		spin_lock_irqsave(&controller->lock, flags);
+
+	list_for_each_entry_safe(evt_handler, evt_handler_temp,
+		&controller->evt_handler_list_head, list_node) {
+		if (evt_handler->index == handle) {
+			CAM_DBG(CAM_IRQ_CTRL, "disable item %d", handle);
+			found = 1;
+			rc = 0;
+			break;
+		}
+	}
+
+	if (!found) {
+		if (need_lock)
+			spin_unlock_irqrestore(&controller->lock, flags);
+		return rc;
+	}
+
+	priority = evt_handler->priority;
+	for (i = 0; i < controller->num_registers; i++) {
+		irq_register = &controller->irq_register_arr[i];
+		irq_register->top_half_enable_mask[priority] &=
+			~(evt_handler->evt_bit_mask_arr[i]);
+
+		irq_mask = cam_io_r_mb(controller->mem_base +
+			irq_register->mask_reg_offset);
+		CAM_DBG(CAM_IRQ_CTRL, "irq_mask 0x%x before disable 0x%x",
+			irq_register->mask_reg_offset, irq_mask);
+		irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
+
+		cam_io_w_mb(irq_mask, controller->mem_base +
+			irq_register->mask_reg_offset);
+		CAM_DBG(CAM_IRQ_CTRL, "irq_mask 0x%x after disable 0x%x",
+			irq_register->mask_reg_offset, irq_mask);
+
+		/* Clear the IRQ bits of this handler */
+		cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
+			controller->mem_base +
+			irq_register->clear_reg_offset);
+
+		if (controller->global_clear_offset)
+			cam_io_w_mb(
+				controller->global_clear_bitmask,
+				controller->mem_base +
+				controller->global_clear_offset);
+	}
+	if (need_lock)
+		spin_unlock_irqrestore(&controller->lock, flags);
+
+	return rc;
+}
+
+int cam_irq_controller_unsubscribe_irq(void *irq_controller,
+	uint32_t handle)
+{
+	struct cam_irq_controller   *controller  = irq_controller;
+	struct cam_irq_evt_handler  *evt_handler = NULL;
+	struct cam_irq_evt_handler  *evt_handler_temp;
+	struct cam_irq_register_obj *irq_register;
+	enum cam_irq_priority_level priority;
+	uint32_t                    i;
+	uint32_t                    found = 0;
+	uint32_t                    irq_mask;
+	unsigned long               flags = 0;
+	int                         rc = -EINVAL;
+	bool                        need_lock;
+
+	need_lock = !in_irq();
+	if (need_lock)
+		spin_lock_irqsave(&controller->lock, flags);
+
+	list_for_each_entry_safe(evt_handler, evt_handler_temp,
+		&controller->evt_handler_list_head, list_node) {
+		if (evt_handler->index == handle) {
+			CAM_DBG(CAM_IRQ_CTRL, "unsubscribe item %d", handle);
+			list_del_init(&evt_handler->list_node);
+			list_del_init(&evt_handler->th_list_node);
+			found = 1;
+			rc = 0;
+			break;
+		}
+	}
+
+	priority = evt_handler->priority;
+	if (found) {
+		for (i = 0; i < controller->num_registers; i++) {
+			irq_register = &controller->irq_register_arr[i];
+			irq_register->top_half_enable_mask[priority] &=
+				~(evt_handler->evt_bit_mask_arr[i]);
+
+			irq_mask = cam_io_r_mb(controller->mem_base +
+				irq_register->mask_reg_offset);
+			irq_mask &= ~(evt_handler->evt_bit_mask_arr[i]);
+
+			cam_io_w_mb(irq_mask, controller->mem_base +
+				irq_register->mask_reg_offset);
+
+			/* Clear the IRQ bits of this handler */
+			cam_io_w_mb(evt_handler->evt_bit_mask_arr[i],
+				controller->mem_base +
+				irq_register->clear_reg_offset);
+			if (controller->global_clear_offset)
+				cam_io_w_mb(
+					controller->global_clear_bitmask,
+					controller->mem_base +
+					controller->global_clear_offset);
+		}
+
+		kfree(evt_handler->evt_bit_mask_arr);
+		kfree(evt_handler);
+	}
+
+	if (need_lock)
+		spin_unlock_irqrestore(&controller->lock, flags);
+
+	return rc;
+}
+
+/**
+ * cam_irq_controller_match_bit_mask()
+ *
+ * @Brief:                This function checks if any of the enabled IRQ bits
+ *                        for a certain handler is Set in the Status values
+ *                        of the controller.
+ *
+ * @controller:           IRQ Controller structure
+ * @evt_handler:          Event handler structure
+ *
+ * @Return:               True: If any interested IRQ Bit is Set
+ *                        False: Otherwise
+ */
+static bool cam_irq_controller_match_bit_mask(
+	struct cam_irq_controller   *controller,
+	struct cam_irq_evt_handler  *evt_handler)
+{
+	int i;
+
+	for (i = 0; i < controller->num_registers; i++) {
+		if (evt_handler->evt_bit_mask_arr[i] &
+			controller->irq_status_arr[i])
+			return true;
+	}
+
+	return false;
+}
+
+static void cam_irq_controller_th_processing(
+	struct cam_irq_controller      *controller,
+	struct list_head               *th_list_head)
+{
+	struct cam_irq_evt_handler     *evt_handler = NULL;
+	struct cam_irq_th_payload      *th_payload = &controller->th_payload;
+	bool                            is_irq_match;
+	int                             rc = -EINVAL;
+	int                             i;
+	void                           *bh_cmd = NULL;
+	struct cam_irq_bh_api          *irq_bh_api = NULL;
+
+	CAM_DBG(CAM_IRQ_CTRL, "Enter");
+
+	if (list_empty(th_list_head))
+		return;
+
+	list_for_each_entry(evt_handler, th_list_head, th_list_node) {
+		is_irq_match = cam_irq_controller_match_bit_mask(controller,
+			evt_handler);
+
+		if (!is_irq_match)
+			continue;
+
+		CAM_DBG(CAM_IRQ_CTRL, "match found");
+
+		cam_irq_th_payload_init(th_payload);
+		th_payload->handler_priv  = evt_handler->handler_priv;
+		th_payload->num_registers = controller->num_registers;
+		for (i = 0; i < controller->num_registers; i++) {
+			th_payload->evt_status_arr[i] =
+				controller->irq_status_arr[i] &
+				evt_handler->evt_bit_mask_arr[i];
+		}
+
+		irq_bh_api = &evt_handler->irq_bh_api;
+		bh_cmd = NULL;
+
+		if (evt_handler->bottom_half_handler) {
+			rc = irq_bh_api->get_bh_payload_func(
+				evt_handler->bottom_half, &bh_cmd);
+			if (rc || !bh_cmd) {
+				CAM_ERR_RATE_LIMIT(CAM_ISP,
+					"Can't get bh payload");
+				continue;
+			}
+		}
+
+		/*
+		 * irq_status_arr[0] is dummy argument passed. the entire
+		 * status array is passed in th_payload.
+		 */
+		if (evt_handler->top_half_handler)
+			rc = evt_handler->top_half_handler(
+				controller->irq_status_arr[0],
+				(void *)th_payload);
+
+		if (rc && bh_cmd) {
+			irq_bh_api->put_bh_payload_func(
+				evt_handler->bottom_half, &bh_cmd);
+			continue;
+		}
+
+		if (evt_handler->bottom_half_handler) {
+			CAM_DBG(CAM_IRQ_CTRL, "Enqueuing bottom half for %s",
+				controller->name);
+			irq_bh_api->bottom_half_enqueue_func(
+				evt_handler->bottom_half,
+				bh_cmd,
+				evt_handler->handler_priv,
+				th_payload->evt_payload_priv,
+				evt_handler->bottom_half_handler);
+		}
+	}
+
+	CAM_DBG(CAM_IRQ_CTRL, "Exit");
+}
+
+irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv)
+{
+	struct cam_irq_controller  *controller  = priv;
+	uint32_t i = 0;
+
+	if (!controller)
+		return IRQ_NONE;
+
+	for (i = 0; i < controller->num_registers; i++) {
+
+		cam_io_w_mb(0x0, controller->mem_base +
+			controller->irq_register_arr[i].clear_reg_offset);
+	}
+
+	if (controller->global_clear_offset)
+		cam_io_w_mb(controller->global_clear_bitmask,
+			controller->mem_base +
+			controller->global_clear_offset);
+
+	for (i = 0; i < controller->num_registers; i++) {
+		cam_io_w_mb(0x0, controller->mem_base +
+		controller->irq_register_arr[i].mask_reg_offset);
+	}
+
+	return IRQ_HANDLED;
+}
+
+irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv)
+{
+	struct cam_irq_controller   *controller  = priv;
+	struct cam_irq_register_obj *irq_register;
+	bool         need_th_processing[CAM_IRQ_PRIORITY_MAX] = {false};
+	int          i;
+	int          j;
+
+	if (!controller)
+		return IRQ_NONE;
+
+	CAM_DBG(CAM_IRQ_CTRL, "locking controller %pK name %s lock %pK",
+		controller, controller->name, &controller->lock);
+	spin_lock(&controller->lock);
+	for (i = 0; i < controller->num_registers; i++) {
+		irq_register = &controller->irq_register_arr[i];
+		controller->irq_status_arr[i] = cam_io_r_mb(
+			controller->mem_base +
+			controller->irq_register_arr[i].status_reg_offset);
+		cam_io_w_mb(controller->irq_status_arr[i],
+			controller->mem_base +
+			controller->irq_register_arr[i].clear_reg_offset);
+		CAM_DBG(CAM_IRQ_CTRL, "Read irq status%d (0x%x) = 0x%x", i,
+			controller->irq_register_arr[i].status_reg_offset,
+			controller->irq_status_arr[i]);
+		for (j = 0; j < CAM_IRQ_PRIORITY_MAX; j++) {
+			if (irq_register->top_half_enable_mask[j] &
+				controller->irq_status_arr[i])
+				need_th_processing[j] = true;
+				CAM_DBG(CAM_IRQ_CTRL,
+					"i %d j %d need_th_processing = %d",
+					i, j, need_th_processing[j]);
+		}
+	}
+
+	CAM_DBG(CAM_IRQ_CTRL, "Status Registers read Successful");
+
+	if (controller->global_clear_offset)
+		cam_io_w_mb(controller->global_clear_bitmask,
+			controller->mem_base + controller->global_clear_offset);
+
+	CAM_DBG(CAM_IRQ_CTRL, "Status Clear done");
+
+	for (i = 0; i < CAM_IRQ_PRIORITY_MAX; i++) {
+		if (need_th_processing[i]) {
+			CAM_DBG(CAM_IRQ_CTRL, "Invoke TH processing");
+			cam_irq_controller_th_processing(controller,
+				&controller->th_list_head[i]);
+		}
+	}
+	spin_unlock(&controller->lock);
+	CAM_DBG(CAM_IRQ_CTRL, "unlocked controller %pK name %s lock %pK",
+		controller, controller->name, &controller->lock);
+
+	return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
new file mode 100644
index 0000000..4c06578
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller/cam_irq_controller.h
@@ -0,0 +1,272 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IRQ_CONTROLLER_H_
+#define _CAM_IRQ_CONTROLLER_H_
+
+#include <linux/interrupt.h>
+
+#define CAM_IRQ_BITS_PER_REGISTER      32
+
+/*
+ * enum cam_irq_priority_level:
+ * @Brief:                  Priority levels for IRQ events.
+ *                          Priority_0 events will be serviced before
+ *                          Priority_1 if they these bits are set in the same
+ *                          Status Read. And so on upto Priority_4.
+ *
+ *                          Default Priority is Priority_4.
+ */
+enum cam_irq_priority_level {
+	CAM_IRQ_PRIORITY_0,
+	CAM_IRQ_PRIORITY_1,
+	CAM_IRQ_PRIORITY_2,
+	CAM_IRQ_PRIORITY_3,
+	CAM_IRQ_PRIORITY_4,
+	CAM_IRQ_PRIORITY_MAX,
+};
+
+/*
+ * struct cam_irq_register_set:
+ * @Brief:                  Structure containing offsets of IRQ related
+ *                          registers belonging to a Set
+ *
+ * @mask_reg_offset:        Offset of IRQ MASK register
+ * @clear_reg_offset:       Offset of IRQ CLEAR register
+ * @status_reg_offset:      Offset of IRQ STATUS register
+ */
+struct cam_irq_register_set {
+	uint32_t                       mask_reg_offset;
+	uint32_t                       clear_reg_offset;
+	uint32_t                       status_reg_offset;
+};
+
+/*
+ * struct cam_irq_controller_reg_info:
+ * @Brief:                  Structure describing the IRQ registers
+ *
+ * @num_registers:          Number of sets(mask/clear/status) of IRQ registers
+ * @irq_reg_set:            Array of Register Set Offsets.
+ *                          Length of array = num_registers
+ * @global_clear_offset:    Offset of Global IRQ Clear register. This register
+ *                          contains the BIT that needs to be set for the CLEAR
+ *                          to take effect
+ * @global_clear_bitmask:   Bitmask needed to be used in Global Clear register
+ *                          for Clear IRQ cmd to take effect
+ */
+struct cam_irq_controller_reg_info {
+	uint32_t                      num_registers;
+	struct cam_irq_register_set  *irq_reg_set;
+	uint32_t                      global_clear_offset;
+	uint32_t                      global_clear_bitmask;
+};
+
+/*
+ * struct cam_irq_th_payload:
+ * @Brief:                  Event payload structure. This structure will be
+ *                          passed to the Top Half handler functions.
+ *
+ * @handler_priv:           Private Data of handling object set when
+ *                          subscribing to IRQ event.
+ * @num_registers:          Length of evt_bit_mask Array below
+ * @evt_status_arr:         Array of Status bitmask read from registers.
+ *                          Length of array = num_registers
+ * @evt_payload_priv:       Private payload pointer which can be set by Top
+ *                          Half handler for use in Bottom Half.
+ */
+struct cam_irq_th_payload {
+	void       *handler_priv;
+	uint32_t    num_registers;
+	uint32_t   *evt_status_arr;
+	void       *evt_payload_priv;
+};
+
+/*
+ * cam_irq_th_payload_init()
+ *
+ * @brief:              Initialize the top half payload structure
+ *
+ * @th_payload:         Top Half payload structure to Initialize
+ *
+ * @return:             Void
+ */
+static inline void cam_irq_th_payload_init(
+	struct cam_irq_th_payload *th_payload) {
+	th_payload->handler_priv = NULL;
+	th_payload->evt_payload_priv = NULL;
+}
+
+typedef int (*CAM_IRQ_HANDLER_TOP_HALF)(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload);
+
+typedef int (*CAM_IRQ_HANDLER_BOTTOM_HALF)(void *handler_priv,
+	void *evt_payload_priv);
+
+typedef void (*CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC)(void *bottom_half,
+	void *bh_cmd, void *handler_priv, void *evt_payload_priv,
+	CAM_IRQ_HANDLER_BOTTOM_HALF);
+
+typedef int (*CAM_IRQ_GET_TASKLET_PAYLOAD_FUNC)(void *bottom_half,
+	void **bh_cmd);
+
+typedef void (*CAM_IRQ_PUT_TASKLET_PAYLOAD_FUNC)(void *bottom_half,
+	void **bh_cmd);
+
+struct cam_irq_bh_api {
+	CAM_IRQ_BOTTOM_HALF_ENQUEUE_FUNC bottom_half_enqueue_func;
+	CAM_IRQ_GET_TASKLET_PAYLOAD_FUNC get_bh_payload_func;
+	CAM_IRQ_PUT_TASKLET_PAYLOAD_FUNC put_bh_payload_func;
+};
+
+/*
+ * cam_irq_controller_init()
+ *
+ * @brief:              Create and Initialize IRQ Controller.
+ *
+ * @name:               Name of IRQ Controller block
+ * @mem_base:           Mapped base address of register space to which
+ *                      register offsets are added to access registers
+ * @register_info:      Register Info structure associated with this Controller
+ * @irq_controller:     Pointer to IRQ Controller that will be filled if
+ *                      initialization is successful
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_irq_controller_init(const char       *name,
+	void __iomem                         *mem_base,
+	struct cam_irq_controller_reg_info   *register_info,
+	void                                **irq_controller);
+
+/*
+ * cam_irq_controller_subscribe_irq()
+ *
+ * @brief:               Subscribe to certain IRQ events.
+ *
+ * @irq_controller:      Pointer to IRQ Controller that controls this event IRQ
+ * @priority:            Priority level of these events used if multiple events
+ *                       are SET in the Status Register
+ * @evt_bit_mask_arr:    evt_bit_mask that has the bits set for IRQs to
+ *                       subscribe for
+ * @handler_priv:        Private data that will be passed to the Top/Bottom Half
+ *                       handler function
+ * @top_half_handler:    Top half Handler callback function
+ * @bottom_half_handler: Bottom half Handler callback function
+ * @bottom_half:         Pointer to bottom_half implementation on which to
+ *                       enqueue the event for further handling
+ * @bottom_half_enqueue_func:
+ *                       Function used to enqueue the bottom_half event
+ *
+ * @return:              Positive: Success. Value represents handle which is
+ *                                 to be used to unsubscribe
+ *                       Negative: Failure
+ */
+int cam_irq_controller_subscribe_irq(void *irq_controller,
+	enum cam_irq_priority_level        priority,
+	uint32_t                          *evt_bit_mask_arr,
+	void                              *handler_priv,
+	CAM_IRQ_HANDLER_TOP_HALF           top_half_handler,
+	CAM_IRQ_HANDLER_BOTTOM_HALF        bottom_half_handler,
+	void                              *bottom_half,
+	struct cam_irq_bh_api             *irq_bh_api);
+
+/*
+ * cam_irq_controller_unsubscribe_irq()
+ *
+ * @brief:               Unsubscribe to IRQ events previously subscribed to.
+ *
+ * @irq_controller:      Pointer to IRQ Controller that controls this event IRQ
+ * @handle:              Handle returned on successful subscribe used to
+ *                       identify the handler object
+ *
+ * @return:              0: Success
+ *                       Negative: Failure
+ */
+int cam_irq_controller_unsubscribe_irq(void *irq_controller,
+	uint32_t handle);
+
+/*
+ * cam_irq_controller_deinit()
+ *
+ * @brief:              Deinitialize IRQ Controller.
+ *
+ * @irq_controller:     Pointer to IRQ Controller that needs to be
+ *                      deinitialized
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_irq_controller_deinit(void **irq_controller);
+
+/*
+ * cam_irq_controller_handle_irq()
+ *
+ * @brief:              Function that should be registered with the IRQ line.
+ *                      This is the first function to be called when the IRQ
+ *                      is fired. It will read the Status register and Clear
+ *                      the IRQ bits. It will then call the top_half handlers
+ *                      and enqueue the result to bottom_half.
+ *
+ * @irq_num:            Number of IRQ line that was set that lead to this
+ *                      function being called
+ * @priv:               Private data registered with request_irq is passed back
+ *                      here. This private data should be the irq_controller
+ *                      structure.
+ *
+ * @return:             IRQ_HANDLED/IRQ_NONE
+ */
+irqreturn_t cam_irq_controller_handle_irq(int irq_num, void *priv);
+
+/*
+ * cam_irq_controller_disable_irq()
+ *
+ * @brief:              Disable the interrupts on given controller.
+ *                      Unsubscribe will disable the IRQ by default, so this is
+ *                      only needed if between subscribe/unsubscribe there is
+ *                      need to disable IRQ again
+ *
+ * @irq_controller:     Pointer to IRQ Controller that controls the registered
+ *                      events to it.
+ * @handle:             Handle returned on successful subscribe, used to
+ *                      identify the handler object
+ *
+ * @return:             0: events found and disabled
+ *                      Negative: events not registered on this controller
+ */
+int cam_irq_controller_disable_irq(void *irq_controller, uint32_t handle);
+
+/*
+ * cam_irq_controller_enable_irq()
+ *
+ * @brief:              Enable the interrupts on given controller.
+ *                      Subscribe will enable the IRQ by default, so this is
+ *                      only needed if between subscribe/unsubscribe there is
+ *                      need to enable IRQ again
+ *
+ * @irq_controller:     Pointer to IRQ Controller that controls the registered
+ *                      events to it.
+ * @handle:             Handle returned on successful subscribe, used to
+ *                      identify the handler object
+ *
+ * @return:             0: events found and enabled
+ *                      Negative: events not registered on this controller
+ */
+int cam_irq_controller_enable_irq(void *irq_controller, uint32_t handle);
+
+/*
+ * cam_irq_controller_clear_and_mask()
+ *
+ * @brief:              This function clears and masks all the irq bits
+ *
+ * @irq_num:            Number of IRQ line that was set that lead to this
+ *                      function being called
+ * @priv:               Private data registered with request_irq is passed back
+ *                      here. This private data should be the irq_controller
+ *                      structure.
+ *
+ * @return:             IRQ_HANDLED/IRQ_NONE
+ */
+irqreturn_t cam_irq_controller_clear_and_mask(int irq_num, void *priv);
+#endif /* _CAM_IRQ_CONTROLLER_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
new file mode 100644
index 0000000..23f094b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include/cam_isp_hw_mgr_intf.h
@@ -0,0 +1,226 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ISP_HW_MGR_INTF_H_
+#define _CAM_ISP_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_hw_mgr_intf.h"
+
+/* MAX IFE instance */
+#define CAM_IFE_HW_NUM_MAX   4
+#define CAM_IFE_RDI_NUM_MAX  4
+
+/**
+ *  enum cam_isp_hw_event_type - Collection of the ISP hardware events
+ */
+enum cam_isp_hw_event_type {
+	CAM_ISP_HW_EVENT_ERROR,
+	CAM_ISP_HW_EVENT_SOF,
+	CAM_ISP_HW_EVENT_REG_UPDATE,
+	CAM_ISP_HW_EVENT_EPOCH,
+	CAM_ISP_HW_EVENT_EOF,
+	CAM_ISP_HW_EVENT_DONE,
+	CAM_ISP_HW_EVENT_MAX
+};
+
+
+/**
+ * enum cam_isp_hw_err_type - Collection of the ISP error types for
+ *                         ISP hardware event CAM_ISP_HW_EVENT_ERROR
+ */
+enum cam_isp_hw_err_type {
+	CAM_ISP_HW_ERROR_NONE,
+	CAM_ISP_HW_ERROR_OVERFLOW,
+	CAM_ISP_HW_ERROR_P2I_ERROR,
+	CAM_ISP_HW_ERROR_VIOLATION,
+	CAM_ISP_HW_ERROR_BUSIF_OVERFLOW,
+	CAM_ISP_HW_ERROR_MAX,
+};
+
+/**
+ *  enum cam_isp_hw_stop_cmd - Specify the stop command type
+ */
+enum cam_isp_hw_stop_cmd {
+	CAM_ISP_HW_STOP_AT_FRAME_BOUNDARY,
+	CAM_ISP_HW_STOP_IMMEDIATELY,
+	CAM_ISP_HW_STOP_MAX,
+};
+
+/**
+ * struct cam_isp_stop_args - hardware stop arguments
+ *
+ * @hw_stop_cmd:               Hardware stop command type information
+ * @stop_only                  Send stop only to hw drivers. No Deinit to be
+ *                             done.
+ *
+ */
+struct cam_isp_stop_args {
+	enum cam_isp_hw_stop_cmd      hw_stop_cmd;
+	bool                          stop_only;
+};
+
+/**
+ * struct cam_isp_start_args - isp hardware start arguments
+ *
+ * @config_args:               Hardware configuration commands.
+ * @start_only                 Send start only to hw drivers. No init to
+ *                             be done.
+ *
+ */
+struct cam_isp_start_args {
+	struct cam_hw_config_args     hw_config;
+	bool                          start_only;
+};
+
+/**
+ * struct cam_isp_bw_config_internal - Internal Bandwidth configuration
+ *
+ * @usage_type:                 Usage type (Single/Dual)
+ * @num_rdi:                    Number of RDI votes
+ * @left_pix_vote:              Bandwidth vote for left ISP
+ * @right_pix_vote:             Bandwidth vote for right ISP
+ * @rdi_vote:                   RDI bandwidth requirements
+ */
+
+struct cam_isp_bw_config_internal {
+	uint32_t                       usage_type;
+	uint32_t                       num_rdi;
+	struct cam_isp_bw_vote         left_pix_vote;
+	struct cam_isp_bw_vote         right_pix_vote;
+	struct cam_isp_bw_vote         rdi_vote[CAM_IFE_RDI_NUM_MAX];
+};
+
+/**
+ * struct cam_isp_prepare_hw_update_data - hw prepare data
+ *
+ * @packet_opcode_type:     Packet header opcode in the packet header
+ *                          this opcode defines, packet is init packet or
+ *                          update packet
+ * @bw_config:              BW config information
+ * @bw_config_valid:        Flag indicating whether the bw_config at the index
+ *                          is valid or not
+ *
+ */
+struct cam_isp_prepare_hw_update_data {
+	uint32_t                          packet_opcode_type;
+	struct cam_isp_bw_config_internal bw_config[CAM_IFE_HW_NUM_MAX];
+	bool                              bw_config_valid[CAM_IFE_HW_NUM_MAX];
+};
+
+
+/**
+ * struct cam_isp_hw_sof_event_data - Event payload for CAM_HW_EVENT_SOF
+ *
+ * @timestamp:   Time stamp for the sof event
+ * @boot_time:   Boot time stamp for the sof event
+ *
+ */
+struct cam_isp_hw_sof_event_data {
+	uint64_t       timestamp;
+	uint64_t       boot_time;
+};
+
+/**
+ * struct cam_isp_hw_reg_update_event_data - Event payload for
+ *                         CAM_HW_EVENT_REG_UPDATE
+ *
+ * @timestamp:     Time stamp for the reg update event
+ *
+ */
+struct cam_isp_hw_reg_update_event_data {
+	uint64_t       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_epoch_event_data - Event payload for CAM_HW_EVENT_EPOCH
+ *
+ * @timestamp:     Time stamp for the epoch event
+ *
+ */
+struct cam_isp_hw_epoch_event_data {
+	uint64_t       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_done_event_data - Event payload for CAM_HW_EVENT_DONE
+ *
+ * @num_handles:           Number of resource handeles
+ * @resource_handle:       Resource handle array
+ * @timestamp:             Timestamp for the buf done event
+ *
+ */
+struct cam_isp_hw_done_event_data {
+	uint32_t             num_handles;
+	uint32_t             resource_handle[
+				CAM_NUM_OUT_PER_COMP_IRQ_MAX];
+	uint64_t       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_eof_event_data - Event payload for CAM_HW_EVENT_EOF
+ *
+ * @timestamp:             Timestamp for the eof event
+ *
+ */
+struct cam_isp_hw_eof_event_data {
+	uint64_t       timestamp;
+};
+
+/**
+ * struct cam_isp_hw_error_event_data - Event payload for CAM_HW_EVENT_ERROR
+ *
+ * @error_type:            Error type for the error event
+ * @timestamp:             Timestamp for the error event
+ * @recovery_enabled:      Identifies if the context needs to recover & reapply
+ *                         this request
+ */
+struct cam_isp_hw_error_event_data {
+	uint32_t             error_type;
+	uint64_t             timestamp;
+	bool                 recovery_enabled;
+};
+
+/* enum cam_isp_hw_mgr_command - Hardware manager command type */
+enum cam_isp_hw_mgr_command {
+	CAM_ISP_HW_MGR_CMD_IS_RDI_ONLY_CONTEXT,
+	CAM_ISP_HW_MGR_CMD_PAUSE_HW,
+	CAM_ISP_HW_MGR_CMD_RESUME_HW,
+	CAM_ISP_HW_MGR_CMD_SOF_DEBUG,
+	CAM_ISP_HW_MGR_CMD_MAX,
+};
+
+/**
+ * struct cam_isp_hw_cmd_args - Payload for hw manager command
+ *
+ * @cmd_type               HW command type
+ * @get_context            Get context type information
+ */
+struct cam_isp_hw_cmd_args {
+	uint32_t                              cmd_type;
+	union {
+		uint32_t                      is_rdi_only_context;
+		uint32_t                      sof_irq_enable;
+	} u;
+};
+
+
+/**
+ * cam_isp_hw_mgr_init()
+ *
+ * @brief:              Initialization function for the ISP hardware manager
+ *
+ * @of_node:            Device node input
+ * @hw_mgr:             Input/output structure for the ISP hardware manager
+ *                          initialization
+ * @iommu_hdl:          Iommu handle to be returned
+ */
+int cam_isp_hw_mgr_init(struct device_node *of_node,
+	struct cam_hw_mgr_intf *hw_mgr, int *iommu_hdl);
+
+#endif /* __CAM_ISP_HW_MGR_INTF_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile
new file mode 100644
index 0000000..8b82314
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SPECTRA_CAMERA) += ife_csid_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += vfe_hw/
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
new file mode 100644
index 0000000..8578a81
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid_dev.o cam_ife_csid_soc.o cam_ife_csid_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ife_csid17x.o cam_ife_csid_lite17x.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
new file mode 100644
index 0000000..e464eb5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid170.h
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IFE_CSID_170_H_
+#define _CAM_IFE_CSID_170_H_
+
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_pxl_reg_offset  cam_ife_csid_170_ipp_reg_offset = {
+	.csid_pxl_irq_status_addr            = 0x30,
+	.csid_pxl_irq_mask_addr              = 0x34,
+	.csid_pxl_irq_clear_addr             = 0x38,
+	.csid_pxl_irq_set_addr               = 0x3c,
+
+	.csid_pxl_cfg0_addr                  = 0x200,
+	.csid_pxl_cfg1_addr                  = 0x204,
+	.csid_pxl_ctrl_addr                  = 0x208,
+	.csid_pxl_frm_drop_pattern_addr      = 0x20c,
+	.csid_pxl_frm_drop_period_addr       = 0x210,
+	.csid_pxl_irq_subsample_pattern_addr = 0x214,
+	.csid_pxl_irq_subsample_period_addr  = 0x218,
+	.csid_pxl_hcrop_addr                 = 0x21c,
+	.csid_pxl_vcrop_addr                 = 0x220,
+	.csid_pxl_pix_drop_pattern_addr      = 0x224,
+	.csid_pxl_pix_drop_period_addr       = 0x228,
+	.csid_pxl_line_drop_pattern_addr     = 0x22c,
+	.csid_pxl_line_drop_period_addr      = 0x230,
+	.csid_pxl_rst_strobes_addr           = 0x240,
+	.csid_pxl_status_addr                = 0x254,
+	.csid_pxl_misr_val_addr              = 0x258,
+	.csid_pxl_format_measure_cfg0_addr   = 0x270,
+	.csid_pxl_format_measure_cfg1_addr   = 0x274,
+	.csid_pxl_format_measure0_addr       = 0x278,
+	.csid_pxl_format_measure1_addr       = 0x27c,
+	.csid_pxl_format_measure2_addr       = 0x280,
+	.csid_pxl_timestamp_curr0_sof_addr   = 0x290,
+	.csid_pxl_timestamp_curr1_sof_addr   = 0x294,
+	.csid_pxl_timestamp_perv0_sof_addr   = 0x298,
+	.csid_pxl_timestamp_perv1_sof_addr   = 0x29c,
+	.csid_pxl_timestamp_curr0_eof_addr   = 0x2a0,
+	.csid_pxl_timestamp_curr1_eof_addr   = 0x2a4,
+	.csid_pxl_timestamp_perv0_eof_addr   = 0x2a8,
+	.csid_pxl_timestamp_perv1_eof_addr   = 0x2ac,
+	/* configurations */
+	.pix_store_en_shift_val              = 7,
+	.early_eof_en_shift_val              = 29,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_0_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_1_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_170_rdi_2_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+			cam_ife_csid_170_csi2_reg_offset = {
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_hdr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+	.csi2_capture_long_pkt_en_shift               = 0,
+	.csi2_capture_short_pkt_en_shift              = 1,
+	.csi2_capture_cphy_pkt_en_shift               = 2,
+	.csi2_capture_long_pkt_dt_shift               = 4,
+	.csi2_capture_long_pkt_vc_shift               = 10,
+	.csi2_capture_short_pkt_vc_shift              = 15,
+	.csi2_capture_cphy_pkt_dt_shift               = 20,
+	.csi2_capture_cphy_pkt_vc_shift               = 26,
+};
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+			cam_ife_csid_170_tpg_reg_offset = {
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/* configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+	.tpg_cpas_ife_reg_offset                      = 0x28,
+};
+
+static struct cam_ife_csid_common_reg_offset
+			cam_ife_csid_170_cmn_reg_offset = {
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.num_rdis                                     = 3,
+	.num_pix                                      = 1,
+	.num_ppp                                      = 0,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+	.ppp_irq_mask_all                             = 0x0,
+	.measure_en_hbi_vbi_cnt_mask                  = 0xC,
+	.format_measure_en_val                        = 1,
+};
+
+static struct cam_ife_csid_reg_offset cam_ife_csid_170_reg_offset = {
+	.cmn_reg          = &cam_ife_csid_170_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_170_csi2_reg_offset,
+	.ipp_reg          = &cam_ife_csid_170_ipp_reg_offset,
+	.ppp_reg          = NULL,
+	.rdi_reg = {
+		&cam_ife_csid_170_rdi_0_reg_offset,
+		&cam_ife_csid_170_rdi_1_reg_offset,
+		&cam_ife_csid_170_rdi_2_reg_offset,
+		NULL,
+		},
+	.tpg_reg = &cam_ife_csid_170_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h
new file mode 100644
index 0000000..dae8c39
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid175.h
@@ -0,0 +1,344 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IFE_CSID_175_H_
+#define _CAM_IFE_CSID_175_H_
+
+#include "cam_ife_csid_core.h"
+
+static struct cam_ife_csid_pxl_reg_offset  cam_ife_csid_175_ipp_reg_offset = {
+	.csid_pxl_irq_status_addr            = 0x30,
+	.csid_pxl_irq_mask_addr              = 0x34,
+	.csid_pxl_irq_clear_addr             = 0x38,
+	.csid_pxl_irq_set_addr               = 0x3c,
+
+	.csid_pxl_cfg0_addr                  = 0x200,
+	.csid_pxl_cfg1_addr                  = 0x204,
+	.csid_pxl_ctrl_addr                  = 0x208,
+	.csid_pxl_frm_drop_pattern_addr      = 0x20c,
+	.csid_pxl_frm_drop_period_addr       = 0x210,
+	.csid_pxl_irq_subsample_pattern_addr = 0x214,
+	.csid_pxl_irq_subsample_period_addr  = 0x218,
+	.csid_pxl_hcrop_addr                 = 0x21c,
+	.csid_pxl_vcrop_addr                 = 0x220,
+	.csid_pxl_pix_drop_pattern_addr      = 0x224,
+	.csid_pxl_pix_drop_period_addr       = 0x228,
+	.csid_pxl_line_drop_pattern_addr     = 0x22c,
+	.csid_pxl_line_drop_period_addr      = 0x230,
+	.csid_pxl_rst_strobes_addr           = 0x240,
+	.csid_pxl_status_addr                = 0x254,
+	.csid_pxl_misr_val_addr              = 0x258,
+	.csid_pxl_format_measure_cfg0_addr   = 0x270,
+	.csid_pxl_format_measure_cfg1_addr   = 0x274,
+	.csid_pxl_format_measure0_addr       = 0x278,
+	.csid_pxl_format_measure1_addr       = 0x27c,
+	.csid_pxl_format_measure2_addr       = 0x280,
+	.csid_pxl_timestamp_curr0_sof_addr   = 0x290,
+	.csid_pxl_timestamp_curr1_sof_addr   = 0x294,
+	.csid_pxl_timestamp_perv0_sof_addr   = 0x298,
+	.csid_pxl_timestamp_perv1_sof_addr   = 0x29c,
+	.csid_pxl_timestamp_curr0_eof_addr   = 0x2a0,
+	.csid_pxl_timestamp_curr1_eof_addr   = 0x2a4,
+	.csid_pxl_timestamp_perv0_eof_addr   = 0x2a8,
+	.csid_pxl_timestamp_perv1_eof_addr   = 0x2ac,
+	/* configurations */
+	.pix_store_en_shift_val              = 7,
+	.early_eof_en_shift_val              = 29,
+};
+
+static struct cam_ife_csid_pxl_reg_offset  cam_ife_csid_175_ppp_reg_offset = {
+	.csid_pxl_irq_status_addr            = 0xa0,
+	.csid_pxl_irq_mask_addr              = 0xa4,
+	.csid_pxl_irq_clear_addr             = 0xa8,
+	.csid_pxl_irq_set_addr               = 0xac,
+
+	.csid_pxl_cfg0_addr                  = 0x700,
+	.csid_pxl_cfg1_addr                  = 0x704,
+	.csid_pxl_ctrl_addr                  = 0x708,
+	.csid_pxl_frm_drop_pattern_addr      = 0x70c,
+	.csid_pxl_frm_drop_period_addr       = 0x710,
+	.csid_pxl_irq_subsample_pattern_addr = 0x714,
+	.csid_pxl_irq_subsample_period_addr  = 0x718,
+	.csid_pxl_hcrop_addr                 = 0x71c,
+	.csid_pxl_vcrop_addr                 = 0x720,
+	.csid_pxl_pix_drop_pattern_addr      = 0x724,
+	.csid_pxl_pix_drop_period_addr       = 0x728,
+	.csid_pxl_line_drop_pattern_addr     = 0x72c,
+	.csid_pxl_line_drop_period_addr      = 0x730,
+	.csid_pxl_rst_strobes_addr           = 0x740,
+	.csid_pxl_status_addr                = 0x754,
+	.csid_pxl_misr_val_addr              = 0x758,
+	.csid_pxl_format_measure_cfg0_addr   = 0x770,
+	.csid_pxl_format_measure_cfg1_addr   = 0x774,
+	.csid_pxl_format_measure0_addr       = 0x778,
+	.csid_pxl_format_measure1_addr       = 0x77c,
+	.csid_pxl_format_measure2_addr       = 0x780,
+	.csid_pxl_timestamp_curr0_sof_addr   = 0x790,
+	.csid_pxl_timestamp_curr1_sof_addr   = 0x794,
+	.csid_pxl_timestamp_perv0_sof_addr   = 0x798,
+	.csid_pxl_timestamp_perv1_sof_addr   = 0x79c,
+	.csid_pxl_timestamp_curr0_eof_addr   = 0x7a0,
+	.csid_pxl_timestamp_curr1_eof_addr   = 0x7a4,
+	.csid_pxl_timestamp_perv0_eof_addr   = 0x7a8,
+	.csid_pxl_timestamp_perv1_eof_addr   = 0x7ac,
+	/* configurations */
+	.pix_store_en_shift_val              = 7,
+	.early_eof_en_shift_val              = 29,
+};
+
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_175_rdi_0_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_175_rdi_1_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static struct cam_ife_csid_rdi_reg_offset cam_ife_csid_175_rdi_2_reg_offset = {
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static struct cam_ife_csid_csi2_rx_reg_offset
+			cam_ife_csid_175_csi2_reg_offset = {
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_hdr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+	.csi2_capture_long_pkt_en_shift               = 0,
+	.csi2_capture_short_pkt_en_shift              = 1,
+	.csi2_capture_cphy_pkt_en_shift               = 2,
+	.csi2_capture_long_pkt_dt_shift               = 4,
+	.csi2_capture_long_pkt_vc_shift               = 10,
+	.csi2_capture_short_pkt_vc_shift              = 15,
+	.csi2_capture_cphy_pkt_dt_shift               = 20,
+	.csi2_capture_cphy_pkt_vc_shift               = 26,
+};
+
+static struct cam_ife_csid_csi2_tpg_reg_offset
+			cam_ife_csid_175_tpg_reg_offset = {
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/* configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+	.tpg_cpas_ife_reg_offset                      = 0x28,
+};
+
+static struct cam_ife_csid_common_reg_offset
+			cam_ife_csid_175_cmn_reg_offset = {
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.num_rdis                                     = 3,
+	.num_pix                                      = 1,
+	.num_ppp                                      = 1,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+	.ppp_irq_mask_all                             = 0xFFFF,
+	.measure_en_hbi_vbi_cnt_mask                  = 0xC,
+	.format_measure_en_val                        = 1,
+};
+
+static struct cam_ife_csid_reg_offset cam_ife_csid_175_reg_offset = {
+	.cmn_reg          = &cam_ife_csid_175_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_175_csi2_reg_offset,
+	.ipp_reg          = &cam_ife_csid_175_ipp_reg_offset,
+	.ppp_reg          = &cam_ife_csid_175_ppp_reg_offset,
+	.rdi_reg = {
+		&cam_ife_csid_175_rdi_0_reg_offset,
+		&cam_ife_csid_175_rdi_1_reg_offset,
+		&cam_ife_csid_175_rdi_2_reg_offset,
+		NULL,
+		},
+	.tpg_reg = &cam_ife_csid_175_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_175_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c
new file mode 100644
index 0000000..6ad8b8b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid17x.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#include <linux/module.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid170.h"
+#include "cam_ife_csid175.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_DRV_NAME                    "csid_17x"
+#define CAM_CSID_VERSION_V170                 0x10070000
+#define CAM_CSID_VERSION_V175                 0x10070050
+
+static struct cam_ife_csid_hw_info cam_ife_csid170_hw_info = {
+	.csid_reg = &cam_ife_csid_170_reg_offset,
+	.hw_dts_version = CAM_CSID_VERSION_V170,
+};
+
+static struct cam_ife_csid_hw_info cam_ife_csid175_hw_info = {
+	.csid_reg = &cam_ife_csid_175_reg_offset,
+	.hw_dts_version = CAM_CSID_VERSION_V175,
+};
+
+static const struct of_device_id cam_ife_csid17x_dt_match[] = {
+	{
+		.compatible = "qcom,csid170",
+		.data = &cam_ife_csid170_hw_info,
+	},
+	{
+		.compatible = "qcom,csid175",
+		.data = &cam_ife_csid175_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_ife_csid17x_dt_match);
+
+static struct platform_driver cam_ife_csid17x_driver = {
+	.probe = cam_ife_csid_probe,
+	.remove = cam_ife_csid_remove,
+	.driver = {
+		.name = CAM_CSID_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ife_csid17x_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_ife_csid17x_init_module(void)
+{
+	return platform_driver_register(&cam_ife_csid17x_driver);
+}
+
+static void __exit cam_ife_csid17x_exit_module(void)
+{
+	platform_driver_unregister(&cam_ife_csid17x_driver);
+}
+
+module_init(cam_ife_csid17x_init_module);
+module_exit(cam_ife_csid17x_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID17X driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
new file mode 100644
index 0000000..c0d5b62
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.c
@@ -0,0 +1,3433 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/iopoll.h>
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include <uapi/media/cam_defs.h>
+
+#include "cam_ife_csid_core.h"
+#include "cam_isp_hw.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+
+/* Timeout value in msec */
+#define IFE_CSID_TIMEOUT                               1000
+
+/* TPG VC/DT values */
+#define CAM_IFE_CSID_TPG_VC_VAL                        0xA
+#define CAM_IFE_CSID_TPG_DT_VAL                        0x2B
+
+/* Timeout values in usec */
+#define CAM_IFE_CSID_TIMEOUT_SLEEP_US                  1000
+#define CAM_IFE_CSID_TIMEOUT_ALL_US                    100000
+
+/*
+ * Constant Factors needed to change QTimer ticks to nanoseconds
+ * QTimer Freq = 19.2 MHz
+ * Time(us) = ticks/19.2
+ * Time(ns) = ticks/19.2 * 1000
+ */
+#define CAM_IFE_CSID_QTIMER_MUL_FACTOR                 10000
+#define CAM_IFE_CSID_QTIMER_DIV_FACTOR                 192
+
+/* Max number of sof irq's triggered in case of SOF freeze */
+#define CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX 12
+
+/* Max CSI Rx irq error count threshold value */
+#define CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT               100
+
+static int cam_ife_csid_is_ipp_ppp_format_supported(
+	uint32_t in_format)
+{
+	int rc = -EINVAL;
+
+	switch (in_format) {
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_DPCM_10_6_10:
+	case CAM_FORMAT_DPCM_10_8_10:
+	case CAM_FORMAT_DPCM_12_6_12:
+	case CAM_FORMAT_DPCM_12_8_12:
+	case CAM_FORMAT_DPCM_14_8_14:
+	case CAM_FORMAT_DPCM_14_10_14:
+		rc = 0;
+		break;
+	default:
+		break;
+	}
+	return rc;
+}
+
+static int cam_ife_csid_get_format_rdi(
+	uint32_t in_format, uint32_t out_format,
+	uint32_t *decode_fmt, uint32_t *plain_fmt)
+{
+	int rc = 0;
+
+	switch (in_format) {
+	case CAM_FORMAT_MIPI_RAW_6:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_6:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN8:
+			*decode_fmt = 0x0;
+			*plain_fmt = 0x0;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_8:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_PLAIN128:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN8:
+			*decode_fmt = 0x1;
+			*plain_fmt = 0x0;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_10:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_PLAIN128:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN16_10:
+			*decode_fmt = 0x2;
+			*plain_fmt = 0x1;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_12:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_12:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN16_12:
+			*decode_fmt = 0x3;
+			*plain_fmt = 0x1;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_14:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_14:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN16_14:
+			*decode_fmt = 0x4;
+			*plain_fmt = 0x1;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_16:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_16:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN16_16:
+			*decode_fmt = 0x5;
+			*plain_fmt = 0x1;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_MIPI_RAW_20:
+		switch (out_format) {
+		case CAM_FORMAT_MIPI_RAW_20:
+			*decode_fmt = 0xf;
+			break;
+		case CAM_FORMAT_PLAIN32_20:
+			*decode_fmt = 0x6;
+			*plain_fmt = 0x2;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_FORMAT_DPCM_10_6_10:
+		*decode_fmt  = 0x7;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_10_8_10:
+		*decode_fmt  = 0x8;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_12_6_12:
+		*decode_fmt  = 0x9;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_12_8_12:
+		*decode_fmt  = 0xA;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_14_8_14:
+		*decode_fmt  = 0xB;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_14_10_14:
+		*decode_fmt  = 0xC;
+		*plain_fmt = 0x1;
+		break;
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "Unsupported format pair in %d out %d",
+			in_format, out_format);
+
+	return rc;
+}
+
+static int cam_ife_csid_get_format_ipp_ppp(
+	uint32_t in_format,
+	uint32_t *decode_fmt, uint32_t *plain_fmt)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_ISP, "input format:%d",
+		 in_format);
+
+	switch (in_format) {
+	case CAM_FORMAT_MIPI_RAW_6:
+		*decode_fmt  = 0;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_8:
+		*decode_fmt  = 0x1;
+		*plain_fmt = 0;
+		break;
+	case CAM_FORMAT_MIPI_RAW_10:
+		*decode_fmt  = 0x2;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_12:
+		*decode_fmt  = 0x3;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_14:
+		*decode_fmt  = 0x4;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_16:
+		*decode_fmt  = 0x5;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_MIPI_RAW_20:
+		*decode_fmt  = 0x6;
+		*plain_fmt = 0x2;
+		break;
+	case CAM_FORMAT_DPCM_10_6_10:
+		*decode_fmt  = 0x7;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_10_8_10:
+		*decode_fmt  = 0x8;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_12_6_12:
+		*decode_fmt  = 0x9;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_12_8_12:
+		*decode_fmt  = 0xA;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_14_8_14:
+		*decode_fmt  = 0xB;
+		*plain_fmt = 0x1;
+		break;
+	case CAM_FORMAT_DPCM_14_10_14:
+		*decode_fmt  = 0xC;
+		*plain_fmt = 0x1;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Unsupported format %d",
+			in_format);
+		rc = -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "decode_fmt:%d plain_fmt:%d",
+		 *decode_fmt, *plain_fmt);
+
+	return rc;
+}
+
+static int cam_ife_csid_cid_get(struct cam_ife_csid_hw *csid_hw,
+	struct cam_isp_resource_node **res, int32_t vc, uint32_t dt)
+{
+	struct cam_ife_csid_cid_data    *cid_data;
+	uint32_t  i = 0;
+
+	*res = NULL;
+
+	/* Return already reserved CID if the VC/DT matches */
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+		if (csid_hw->cid_res[i].res_state >=
+			CAM_ISP_RESOURCE_STATE_RESERVED) {
+			cid_data = (struct cam_ife_csid_cid_data *)
+				csid_hw->cid_res[i].res_priv;
+			if (cid_data->vc == vc && cid_data->dt == dt) {
+				cid_data->cnt++;
+				*res = &csid_hw->cid_res[i];
+				return 0;
+			}
+		}
+	}
+
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++) {
+		if (csid_hw->cid_res[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			cid_data = (struct cam_ife_csid_cid_data *)
+				csid_hw->cid_res[i].res_priv;
+			cid_data->vc  = vc;
+			cid_data->dt  = dt;
+			cid_data->cnt = 1;
+			csid_hw->cid_res[i].res_state =
+				CAM_ISP_RESOURCE_STATE_RESERVED;
+			*res = &csid_hw->cid_res[i];
+			CAM_DBG(CAM_ISP, "CSID:%d CID %d allocated",
+				csid_hw->hw_intf->hw_idx,
+				csid_hw->cid_res[i].res_id);
+			return 0;
+		}
+	}
+
+	CAM_ERR(CAM_ISP, "CSID:%d Free cid is not available",
+		 csid_hw->hw_intf->hw_idx);
+
+	return -EINVAL;
+}
+
+
+static int cam_ife_csid_global_reset(struct cam_ife_csid_hw *csid_hw)
+{
+	struct cam_hw_soc_info                *soc_info;
+	const struct cam_ife_csid_reg_offset  *csid_reg;
+	int rc = 0;
+	uint32_t val = 0, i;
+	uint32_t status;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid HW State:%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d Csid reset",
+		csid_hw->hw_intf->hw_idx);
+
+	/* Mask all interrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->num_pix)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+
+	if (csid_reg->cmn_reg->num_ppp)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+		cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->num_pix)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->num_ppp)
+		cam_io_w_mb(csid_reg->cmn_reg->ppp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
+
+	for (i = 0 ; i < csid_reg->cmn_reg->num_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	cam_io_w_mb(0x80, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	/* enable the IPP and RDI format measure */
+	if (csid_reg->cmn_reg->num_pix)
+		cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_cfg0_addr);
+
+	if (csid_reg->cmn_reg->num_ppp)
+		cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_cfg0_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+		cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_cfg0_addr);
+
+	/* perform the top CSID HW and SW registers reset */
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb_sw_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+
+	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr,
+			status, (status & 0x1) == 0x1,
+		CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+			  csid_hw->hw_intf->hw_idx, rc);
+		rc = -ETIMEDOUT;
+	}
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+	if (val != 0)
+		CAM_ERR(CAM_ISP, "CSID:%d IRQ value after reset rc = %d",
+			csid_hw->hw_intf->hw_idx, val);
+	csid_hw->error_irq_count = 0;
+
+	return rc;
+}
+
+static int cam_ife_csid_path_reset(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_reset_cfg_args  *reset)
+{
+	int rc = 0;
+	struct cam_hw_soc_info                    *soc_info;
+	struct cam_isp_resource_node              *res;
+	const struct cam_ife_csid_reg_offset      *csid_reg;
+	uint32_t  reset_strb_addr, reset_strb_val, val, id;
+	struct completion  *complete;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	res      = reset->node_res;
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid hw state :%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d resource:%d",
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		if (!csid_reg->ipp_reg) {
+			CAM_ERR(CAM_ISP, "CSID:%d IPP not supported :%d",
+				 csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr = csid_reg->ipp_reg->csid_pxl_rst_strobes_addr;
+		complete = &csid_hw->csid_ipp_complete;
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			 csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+
+	} else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) {
+		if (!csid_reg->ppp_reg) {
+			CAM_ERR(CAM_ISP, "CSID:%d PPP not supported :%d",
+				csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr = csid_reg->ppp_reg->csid_pxl_rst_strobes_addr;
+		complete = &csid_hw->csid_ppp_complete;
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			 csid_reg->ppp_reg->csid_pxl_irq_mask_addr);
+	} else {
+		id = res->res_id;
+		if (!csid_reg->rdi_reg[id]) {
+			CAM_ERR(CAM_ISP, "CSID:%d RDI res not supported :%d",
+				 csid_hw->hw_intf->hw_idx,
+				res->res_id);
+			return -EINVAL;
+		}
+
+		reset_strb_addr =
+			csid_reg->rdi_reg[id]->csid_rdi_rst_strobes_addr;
+		complete =
+			&csid_hw->csid_rdin_complete[id];
+
+		/* Enable path reset done interrupt */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+		val |= CSID_PATH_INFO_RST_DONE;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+	}
+
+	init_completion(complete);
+	reset_strb_val = csid_reg->cmn_reg->path_rst_stb_all;
+
+	/* Enable the Test gen before reset */
+	cam_io_w_mb(1,	csid_hw->hw_info->soc_info.reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+	/* Reset the corresponding ife csid path */
+	cam_io_w_mb(reset_strb_val, soc_info->reg_map[0].mem_base +
+				reset_strb_addr);
+
+	rc = wait_for_completion_timeout(complete,
+		msecs_to_jiffies(IFE_CSID_TIMEOUT));
+	if (rc <= 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d Res id %d fail rc = %d",
+			 csid_hw->hw_intf->hw_idx,
+			res->res_id,  rc);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	}
+
+	/* Disable Test Gen after reset*/
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+
+end:
+	return rc;
+
+}
+
+static int cam_ife_csid_cid_reserve(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_hw_reserve_resource_args  *cid_reserv)
+{
+	int rc = 0;
+	struct cam_ife_csid_cid_data       *cid_data;
+	uint32_t camera_hw_version;
+
+	CAM_DBG(CAM_ISP,
+		"CSID:%d res_sel:0x%x Lane type:%d lane_num:%d dt:%d vc:%d",
+		csid_hw->hw_intf->hw_idx,
+		cid_reserv->in_port->res_type,
+		cid_reserv->in_port->lane_type,
+		cid_reserv->in_port->lane_num,
+		cid_reserv->in_port->dt,
+		cid_reserv->in_port->vc);
+
+	if (cid_reserv->in_port->res_type >= CAM_ISP_IFE_IN_RES_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d  Invalid phy sel %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->res_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->lane_type >= CAM_ISP_LANE_TYPE_MAX &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		CAM_ERR(CAM_ISP, "CSID:%d  Invalid lane type %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((cid_reserv->in_port->lane_type ==  CAM_ISP_LANE_TYPE_DPHY &&
+		cid_reserv->in_port->lane_num > 4) &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid lane num %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+	if ((cid_reserv->in_port->lane_type == CAM_ISP_LANE_TYPE_CPHY &&
+		cid_reserv->in_port->lane_num > 3) &&
+		cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+		CAM_ERR(CAM_ISP, " CSID:%d Invalid lane type %d & num %d",
+			 csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->lane_type,
+			cid_reserv->in_port->lane_num);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* CSID  CSI2 v2.0 supports 31 vc  */
+	if (cid_reserv->in_port->dt > 0x3f ||
+		cid_reserv->in_port->vc > 0x1f) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->vc, cid_reserv->in_port->dt);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG && (
+		(cid_reserv->in_port->format < CAM_FORMAT_MIPI_RAW_8 &&
+		cid_reserv->in_port->format > CAM_FORMAT_MIPI_RAW_16))) {
+		CAM_ERR(CAM_ISP, " CSID:%d Invalid tpg decode fmt %d",
+			 csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->format);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (csid_hw->csi2_reserve_cnt == UINT_MAX) {
+		CAM_ERR(CAM_ISP,
+			"CSID%d reserve cnt reached max",
+			csid_hw->hw_intf->hw_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to get HW version rc:%d", rc);
+		goto end;
+	}
+	CAM_DBG(CAM_ISP, "HW version: %d", camera_hw_version);
+
+	switch (camera_hw_version) {
+	case CAM_CPAS_TITAN_NONE:
+	case CAM_CPAS_TITAN_MAX:
+		CAM_ERR(CAM_ISP, "Invalid HW version: %d", camera_hw_version);
+		break;
+	case CAM_CPAS_TITAN_170_V100:
+	case CAM_CPAS_TITAN_170_V110:
+	case CAM_CPAS_TITAN_170_V120:
+		if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_PHY_3 &&
+			csid_hw->hw_intf->hw_idx != 2) {
+			rc = -EINVAL;
+			goto end;
+		}
+		break;
+	default:
+		break;
+	}
+	CAM_DBG(CAM_ISP, "Reserve_cnt %u", csid_hw->csi2_reserve_cnt);
+
+	if (csid_hw->csi2_reserve_cnt) {
+		/* current configure res type should match requested res type */
+		if (csid_hw->res_type != cid_reserv->in_port->res_type) {
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cid_reserv->in_port->res_type != CAM_ISP_IFE_IN_RES_TPG) {
+			if (csid_hw->csi2_rx_cfg.lane_cfg !=
+				cid_reserv->in_port->lane_cfg  ||
+				csid_hw->csi2_rx_cfg.lane_type !=
+				cid_reserv->in_port->lane_type ||
+				csid_hw->csi2_rx_cfg.lane_num !=
+				cid_reserv->in_port->lane_num) {
+				rc = -EINVAL;
+				goto end;
+				}
+		} else {
+			if (csid_hw->tpg_cfg.in_format !=
+				cid_reserv->in_port->format     ||
+				csid_hw->tpg_cfg.width !=
+				cid_reserv->in_port->left_width ||
+				csid_hw->tpg_cfg.height !=
+				cid_reserv->in_port->height     ||
+				csid_hw->tpg_cfg.test_pattern !=
+				cid_reserv->in_port->test_pattern) {
+				rc = -EINVAL;
+				goto end;
+			}
+		}
+	}
+
+	switch (cid_reserv->res_id) {
+	case CAM_IFE_PIX_PATH_RES_IPP:
+		if (csid_hw->ipp_res.res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d IPP resource not available",
+				csid_hw->hw_intf->hw_idx);
+			rc = -EINVAL;
+			goto end;
+		}
+		break;
+	case CAM_IFE_PIX_PATH_RES_PPP:
+		if (csid_hw->ppp_res.res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d PPP resource not available state %d",
+				csid_hw->hw_intf->hw_idx,
+				csid_hw->ppp_res.res_state);
+			rc = -EINVAL;
+			goto end;
+		}
+		break;
+	case CAM_IFE_PIX_PATH_RES_RDI_0:
+	case CAM_IFE_PIX_PATH_RES_RDI_1:
+	case CAM_IFE_PIX_PATH_RES_RDI_2:
+	case CAM_IFE_PIX_PATH_RES_RDI_3:
+		if (csid_hw->rdi_res[cid_reserv->res_id].res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d RDI:%d resource not available",
+				csid_hw->hw_intf->hw_idx,
+				cid_reserv->res_id);
+			rc = -EINVAL;
+			goto end;
+		}
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID%d: Invalid csid path",
+			csid_hw->hw_intf->hw_idx);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = cam_ife_csid_cid_get(csid_hw,
+		&cid_reserv->node_res,
+		cid_reserv->in_port->vc,
+		cid_reserv->in_port->dt);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CSID:%d CID Reserve failed res_type %d",
+			csid_hw->hw_intf->hw_idx,
+			cid_reserv->in_port->res_type);
+		goto end;
+	}
+	cid_data = (struct cam_ife_csid_cid_data *)
+		cid_reserv->node_res->res_priv;
+
+	if (!csid_hw->csi2_reserve_cnt) {
+		csid_hw->res_type = cid_reserv->in_port->res_type;
+
+		csid_hw->csi2_rx_cfg.lane_cfg =
+			cid_reserv->in_port->lane_cfg;
+		csid_hw->csi2_rx_cfg.lane_type =
+			cid_reserv->in_port->lane_type;
+		csid_hw->csi2_rx_cfg.lane_num =
+			cid_reserv->in_port->lane_num;
+
+		if (cid_reserv->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+			csid_hw->csi2_rx_cfg.phy_sel = 0;
+			if (cid_reserv->in_port->format >
+			    CAM_FORMAT_MIPI_RAW_16) {
+				CAM_ERR(CAM_ISP, " Wrong TPG format");
+				rc = -EINVAL;
+				goto end;
+			}
+			csid_hw->tpg_cfg.in_format =
+				cid_reserv->in_port->format;
+			csid_hw->tpg_cfg.usage_type =
+				cid_reserv->in_port->usage_type;
+			if (cid_reserv->in_port->usage_type)
+				csid_hw->tpg_cfg.width =
+					(cid_reserv->in_port->right_stop + 1);
+			else
+				csid_hw->tpg_cfg.width =
+					cid_reserv->in_port->left_width;
+
+			csid_hw->tpg_cfg.height = cid_reserv->in_port->height;
+			csid_hw->tpg_cfg.test_pattern =
+				cid_reserv->in_port->test_pattern;
+
+			CAM_DBG(CAM_ISP, "CSID:%d TPG width:%d height:%d",
+				csid_hw->hw_intf->hw_idx,
+				csid_hw->tpg_cfg.width,
+				csid_hw->tpg_cfg.height);
+
+			cid_data->tpg_set = 1;
+		} else {
+			csid_hw->csi2_rx_cfg.phy_sel =
+				(cid_reserv->in_port->res_type & 0xFF) - 1;
+		}
+	}
+
+	csid_hw->csi2_reserve_cnt++;
+	CAM_DBG(CAM_ISP, "CSID:%d CID:%d acquired",
+		csid_hw->hw_intf->hw_idx,
+		cid_reserv->node_res->res_id);
+
+end:
+	return rc;
+}
+
+
+static int cam_ife_csid_path_reserve(struct cam_ife_csid_hw *csid_hw,
+	struct cam_csid_hw_reserve_resource_args  *reserve)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg    *path_data;
+	struct cam_isp_resource_node    *res;
+
+	/* CSID  CSI2 v2.0 supports 31 vc */
+	if (reserve->in_port->dt > 0x3f || reserve->in_port->vc > 0x1f ||
+		(reserve->sync_mode >= CAM_ISP_HW_SYNC_MAX)) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid vc:%d dt %d mode:%d",
+			 csid_hw->hw_intf->hw_idx,
+			reserve->in_port->vc, reserve->in_port->dt,
+			reserve->sync_mode);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	switch (reserve->res_id) {
+	case CAM_IFE_PIX_PATH_RES_IPP:
+		if (csid_hw->ipp_res.res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d IPP resource not available %d",
+				csid_hw->hw_intf->hw_idx,
+				csid_hw->ipp_res.res_state);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cam_ife_csid_is_ipp_ppp_format_supported(
+				reserve->in_port->format)) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d res id:%d un support format %d",
+				csid_hw->hw_intf->hw_idx, reserve->res_id,
+				reserve->in_port->format);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* assign the IPP resource */
+		res = &csid_hw->ipp_res;
+		CAM_DBG(CAM_ISP,
+			"CSID:%d IPP resource:%d acquired successfully",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+
+		break;
+	case CAM_IFE_PIX_PATH_RES_PPP:
+		if (csid_hw->ppp_res.res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d PPP resource not available %d",
+				csid_hw->hw_intf->hw_idx,
+				csid_hw->ppp_res.res_state);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (cam_ife_csid_is_ipp_ppp_format_supported(
+				reserve->in_port->format)) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d res id:%d unsupported format %d",
+				csid_hw->hw_intf->hw_idx, reserve->res_id,
+				reserve->in_port->format);
+			rc = -EINVAL;
+			goto end;
+		}
+
+		/* assign the PPP resource */
+		res = &csid_hw->ppp_res;
+		CAM_DBG(CAM_ISP,
+			"CSID:%d PPP resource:%d acquired successfully",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+
+		break;
+	case CAM_IFE_PIX_PATH_RES_RDI_0:
+	case CAM_IFE_PIX_PATH_RES_RDI_1:
+	case CAM_IFE_PIX_PATH_RES_RDI_2:
+	case CAM_IFE_PIX_PATH_RES_RDI_3:
+		if (csid_hw->rdi_res[reserve->res_id].res_state !=
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+			CAM_ERR(CAM_ISP,
+				"CSID:%d RDI:%d resource not available %d",
+				csid_hw->hw_intf->hw_idx,
+				reserve->res_id,
+				csid_hw->rdi_res[reserve->res_id].res_state);
+			rc = -EINVAL;
+			goto end;
+		} else {
+			res = &csid_hw->rdi_res[reserve->res_id];
+			CAM_DBG(CAM_ISP,
+				"CSID:%d RDI resource:%d acquire success",
+				csid_hw->hw_intf->hw_idx,
+				res->res_id);
+		}
+
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res id:%d",
+			csid_hw->hw_intf->hw_idx, reserve->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	path_data = (struct cam_ife_csid_path_cfg   *)res->res_priv;
+
+	path_data->cid = reserve->cid;
+	path_data->in_format = reserve->in_port->format;
+	path_data->out_format = reserve->out_port->format;
+	path_data->sync_mode = reserve->sync_mode;
+	path_data->height  = reserve->in_port->height;
+	path_data->start_line = reserve->in_port->line_start;
+	path_data->end_line = reserve->in_port->line_stop;
+
+	/* Enable RDI crop for single ife use case only */
+	switch (reserve->res_id) {
+	case CAM_IFE_PIX_PATH_RES_RDI_0:
+	case CAM_IFE_PIX_PATH_RES_RDI_1:
+	case CAM_IFE_PIX_PATH_RES_RDI_2:
+	case CAM_IFE_PIX_PATH_RES_RDI_3:
+		if (reserve->in_port->usage_type)
+			path_data->crop_enable = false;
+		else
+			path_data->crop_enable = true;
+
+		break;
+	case CAM_IFE_PIX_PATH_RES_IPP:
+		path_data->crop_enable = true;
+		break;
+	case CAM_IFE_PIX_PATH_RES_PPP:
+		path_data->crop_enable = false;
+		break;
+	default:
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP,
+		"Res id: %d height:%d line_start %d line_stop %d crop_en %d",
+		reserve->res_id, reserve->in_port->height,
+		reserve->in_port->line_start, reserve->in_port->line_stop,
+		path_data->crop_enable);
+
+	if (reserve->in_port->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+		path_data->dt = CAM_IFE_CSID_TPG_DT_VAL;
+		path_data->vc = CAM_IFE_CSID_TPG_VC_VAL;
+	} else {
+		path_data->dt = reserve->in_port->dt;
+		path_data->vc = reserve->in_port->vc;
+	}
+
+	if (reserve->sync_mode == CAM_ISP_HW_SYNC_MASTER) {
+		path_data->start_pixel = reserve->in_port->left_start;
+		path_data->end_pixel = reserve->in_port->left_stop;
+		path_data->width  = reserve->in_port->left_width;
+		CAM_DBG(CAM_ISP, "CSID:%d master:startpixel 0x%x endpixel:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
+			path_data->end_pixel);
+		CAM_DBG(CAM_ISP, "CSID:%d master:line start:0x%x line end:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_line,
+			path_data->end_line);
+	} else if (reserve->sync_mode == CAM_ISP_HW_SYNC_SLAVE) {
+		path_data->master_idx = reserve->master_idx;
+		CAM_DBG(CAM_ISP, "CSID:%d master_idx=%d",
+			csid_hw->hw_intf->hw_idx, path_data->master_idx);
+		path_data->start_pixel = reserve->in_port->right_start;
+		path_data->end_pixel = reserve->in_port->right_stop;
+		path_data->width  = reserve->in_port->right_width;
+		CAM_DBG(CAM_ISP, "CSID:%d slave:start:0x%x end:0x%x width 0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_pixel,
+			path_data->end_pixel, path_data->width);
+		CAM_DBG(CAM_ISP, "CSID:%d slave:line start:0x%x line end:0x%x",
+			csid_hw->hw_intf->hw_idx, path_data->start_line,
+			path_data->end_line);
+	} else {
+		path_data->width  = reserve->in_port->left_width;
+		path_data->start_pixel = reserve->in_port->left_start;
+		path_data->end_pixel = reserve->in_port->left_stop;
+		CAM_DBG(CAM_ISP, "Res id: %d left width %d start: %d stop:%d",
+			reserve->res_id, reserve->in_port->left_width,
+			reserve->in_port->left_start,
+			reserve->in_port->left_stop);
+	}
+
+	CAM_DBG(CAM_ISP, "Res %d width %d height %d", reserve->res_id,
+		path_data->width, path_data->height);
+	reserve->node_res = res;
+
+end:
+	return rc;
+}
+
+static int cam_ife_csid_enable_hw(struct cam_ife_csid_hw  *csid_hw)
+{
+	int rc = 0;
+	const struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info              *soc_info;
+	uint32_t i, val, clk_lvl;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	/* overflow check before increment */
+	if (csid_hw->hw_info->open_count == UINT_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+			csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	/* Increment ref Count */
+	csid_hw->hw_info->open_count++;
+	if (csid_hw->hw_info->open_count > 1) {
+		CAM_DBG(CAM_ISP, "CSID hw has already been enabled");
+		return rc;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d init CSID HW",
+		csid_hw->hw_intf->hw_idx);
+
+	clk_lvl = cam_ife_csid_get_vote_level(soc_info, csid_hw->clk_rate);
+	CAM_DBG(CAM_ISP, "CSID clock lvl %u", clk_lvl);
+
+	rc = cam_ife_csid_enable_soc_resources(soc_info, clk_lvl);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CSID:%d Enable SOC failed",
+			csid_hw->hw_intf->hw_idx);
+		goto err;
+	}
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_UP;
+	/* Reset CSID top */
+	rc = cam_ife_csid_global_reset(csid_hw);
+	if (rc)
+		goto disable_soc;
+
+	/* clear all interrupts */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+
+	cam_io_w_mb(csid_reg->csi2_reg->csi2_irq_mask_all,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->num_pix)
+		cam_io_w_mb(csid_reg->cmn_reg->ipp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->num_ppp)
+		cam_io_w_mb(csid_reg->cmn_reg->ppp_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+		cam_io_w_mb(csid_reg->cmn_reg->rdi_irq_mask_all,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->cmn_reg->csid_hw_version_addr);
+	CAM_DBG(CAM_ISP, "CSID:%d CSID HW version: 0x%x",
+		csid_hw->hw_intf->hw_idx, val);
+
+	return 0;
+
+disable_soc:
+	cam_ife_csid_disable_soc_resources(soc_info);
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+err:
+	csid_hw->hw_info->open_count--;
+	return rc;
+}
+
+static int cam_ife_csid_disable_hw(struct cam_ife_csid_hw *csid_hw)
+{
+	int rc = -EINVAL;
+	struct cam_hw_soc_info                   *soc_info;
+	const struct cam_ife_csid_reg_offset     *csid_reg;
+
+	/* Check for refcount */
+	if (!csid_hw->hw_info->open_count) {
+		CAM_WARN(CAM_ISP, "Unbalanced disable_hw");
+		return rc;
+	}
+
+	/*  Decrement ref Count */
+	csid_hw->hw_info->open_count--;
+
+	if (csid_hw->hw_info->open_count) {
+		rc = 0;
+		return rc;
+	}
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	CAM_DBG(CAM_ISP, "%s:Calling Global Reset\n", __func__);
+	cam_ife_csid_global_reset(csid_hw);
+	CAM_DBG(CAM_ISP, "%s:Global Reset Done\n", __func__);
+
+	CAM_DBG(CAM_ISP, "CSID:%d De-init CSID HW",
+		csid_hw->hw_intf->hw_idx);
+
+	/*disable the top IRQ interrupt */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_mask_addr);
+
+	rc = cam_ife_csid_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CSID:%d Disable CSID SOC failed",
+			csid_hw->hw_intf->hw_idx);
+
+	csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	csid_hw->error_irq_count = 0;
+
+	return rc;
+}
+
+
+static int cam_ife_csid_tpg_start(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	int rc = 0;
+	uint32_t  val = 0;
+	struct cam_hw_soc_info    *soc_info;
+	const struct cam_ife_csid_reg_offset *csid_reg = NULL;
+
+	csid_hw->tpg_start_cnt++;
+	if (csid_hw->tpg_start_cnt == 1) {
+		/*Enable the TPG */
+		CAM_DBG(CAM_ISP, "CSID:%d start CSID TPG",
+			csid_hw->hw_intf->hw_idx);
+
+		soc_info = &csid_hw->hw_info->soc_info;
+		{
+			uint32_t val;
+			uint32_t i;
+			uint32_t base = 0x600;
+
+			CAM_DBG(CAM_ISP, "================ TPG ============");
+			for (i = 0; i < 16; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
+					(base + i*4), val);
+			}
+
+			CAM_DBG(CAM_ISP, "================ IPP =============");
+			base = 0x200;
+			for (i = 0; i < 10; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
+					(base + i*4), val);
+			}
+
+			CAM_DBG(CAM_ISP, "================ RX =============");
+			base = 0x100;
+			for (i = 0; i < 5; i++) {
+				val = cam_io_r_mb(
+					soc_info->reg_map[0].mem_base +
+					base + i * 4);
+				CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x",
+					(base + i*4), val);
+			}
+		}
+
+		/* Enable the IFE force clock on for dual isp case */
+		csid_reg = csid_hw->csid_info->csid_reg;
+		if (csid_hw->tpg_cfg.usage_type) {
+			rc = cam_ife_csid_enable_ife_force_clock_on(soc_info,
+				csid_reg->tpg_reg->tpg_cpas_ife_reg_offset);
+			if (rc)
+				return rc;
+		}
+
+		CAM_DBG(CAM_ISP, "============ TPG control ============");
+		val = (4 << 20);
+		val |= (0x80 << 8);
+		val |= (((csid_hw->csi2_rx_cfg.lane_num - 1) & 0x3) << 4);
+		val |= 7;
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base + 0x600);
+		CAM_DBG(CAM_ISP, "reg 0x%x = 0x%x", 0x600, val);
+	}
+
+	return 0;
+}
+
+static int cam_ife_csid_tpg_stop(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	int rc = 0;
+	struct cam_hw_soc_info                 *soc_info;
+	const struct cam_ife_csid_reg_offset   *csid_reg = NULL;
+
+	if (csid_hw->tpg_start_cnt)
+		csid_hw->tpg_start_cnt--;
+
+	if (csid_hw->tpg_start_cnt)
+		return 0;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	/* disable the TPG */
+	if (!csid_hw->tpg_start_cnt) {
+		CAM_DBG(CAM_ISP, "CSID:%d stop CSID TPG",
+			csid_hw->hw_intf->hw_idx);
+
+		/* Disable the IFE force clock on for dual isp case */
+		if (csid_hw->tpg_cfg.usage_type)
+			rc = cam_ife_csid_disable_ife_force_clock_on(soc_info,
+				csid_reg->tpg_reg->tpg_cpas_ife_reg_offset);
+
+		/*stop the TPG */
+		cam_io_w_mb(0,  soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_ctrl_addr);
+	}
+
+	return 0;
+}
+
+
+static int cam_ife_csid_config_tpg(struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node       *res)
+{
+	const struct cam_ife_csid_reg_offset *csid_reg;
+	struct cam_hw_soc_info               *soc_info;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	CAM_DBG(CAM_ISP, "CSID:%d TPG config",
+		csid_hw->hw_intf->hw_idx);
+
+	/* configure one DT, infinite frames */
+	val = (0 << 16) | (1 << 10) | CAM_IFE_CSID_TPG_VC_VAL;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_vc_cfg0_addr);
+
+	/* vertical blanking count = 0x3FF, horzontal blanking count = 0x740*/
+	val = (0x3FF << 12) | 0x740;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->tpg_reg->csid_tpg_vc_cfg1_addr);
+
+	cam_io_w_mb(0x12345678, soc_info->reg_map[0].mem_base +
+		csid_hw->csid_info->csid_reg->tpg_reg->csid_tpg_lfsr_seed_addr);
+
+	val = csid_hw->tpg_cfg.width << 16 |
+		csid_hw->tpg_cfg.height;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_0_addr);
+
+	cam_io_w_mb(CAM_IFE_CSID_TPG_DT_VAL, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_1_addr);
+
+	/*
+	 * in_format is the same as the input resource format.
+	 * it is one larger than the register spec format.
+	 */
+	val = ((csid_hw->tpg_cfg.in_format - 1) << 16) | 0x8;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_dt_n_cfg_2_addr);
+
+	/* static frame with split color bar */
+	val =  1 << 5;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_color_bars_cfg_addr);
+	/* config pix pattern */
+	cam_io_w_mb(csid_hw->tpg_cfg.test_pattern,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->tpg_reg->csid_tpg_common_gen_cfg_addr);
+
+	return 0;
+}
+
+static int cam_ife_csid_enable_csi2(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	const struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+	struct cam_ife_csid_cid_data               *cid_data;
+	uint32_t val = 0;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CAM_DBG(CAM_ISP, "CSID:%d count:%d config csi2 rx",
+		csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	/* overflow check before increment */
+	if (csid_hw->csi2_cfg_cnt == UINT_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d Open count reached max",
+			csid_hw->hw_intf->hw_idx);
+		return -EINVAL;
+	}
+
+	cid_data = (struct cam_ife_csid_cid_data *)res->res_priv;
+
+	res->res_state  = CAM_ISP_RESOURCE_STATE_STREAMING;
+	csid_hw->csi2_cfg_cnt++;
+	if (csid_hw->csi2_cfg_cnt > 1)
+		return rc;
+
+	/* rx cfg0 */
+	val = (csid_hw->csi2_rx_cfg.lane_num - 1)  |
+		(csid_hw->csi2_rx_cfg.lane_cfg << 4) |
+		(csid_hw->csi2_rx_cfg.lane_type << 24);
+	val |= (csid_hw->csi2_rx_cfg.phy_sel & 0x3) << 20;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+
+	/* rx cfg1*/
+	val = (1 << csid_reg->csi2_reg->csi2_misr_enable_shift_val);
+	/* if VC value is more than 3 than set full width of VC */
+	if (cid_data->vc > 3)
+		val |= (1 << csid_reg->csi2_reg->csi2_vc_mode_shift_val);
+
+	/* enable packet ecc correction */
+	val |= 1;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG) {
+		/* Config the TPG */
+		rc = cam_ife_csid_config_tpg(csid_hw, res);
+		if (rc) {
+			res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+			return rc;
+		}
+	}
+
+	/*Enable the CSI2 rx inerrupts */
+	val = CSID_CSI2_RX_INFO_RST_DONE |
+		CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW |
+		CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION |
+		CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION |
+		CSID_CSI2_RX_ERROR_CRC |
+		CSID_CSI2_RX_ERROR_ECC |
+		CSID_CSI2_RX_ERROR_MMAPPED_VC_DT |
+		CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW |
+		CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME |
+		CSID_CSI2_RX_ERROR_CPHY_PH_CRC;
+
+	/* Enable the interrupt based on csid debug info set */
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ)
+		val |= CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ)
+		val |= CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED |
+			CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val |= CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED;
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	return 0;
+}
+
+static int cam_ife_csid_disable_csi2(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	const struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+
+	if (res->res_id >= CAM_IFE_CSID_CID_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res id :%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CAM_DBG(CAM_ISP, "CSID:%d cnt : %d Disable csi2 rx",
+		csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	if (csid_hw->csi2_cfg_cnt)
+		csid_hw->csi2_cfg_cnt--;
+
+	if (csid_hw->csi2_cfg_cnt)
+		return 0;
+
+	/* Disable the CSI2 rx inerrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	/* Reset the Rx CFG registers */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return 0;
+}
+
+static void cam_ife_csid_halt_csi2(
+	struct cam_ife_csid_hw          *csid_hw)
+{
+	const struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	CAM_INFO(CAM_ISP, "CSID: %d cnt: %d Halt csi2 rx",
+		csid_hw->hw_intf->hw_idx, csid_hw->csi2_cfg_cnt);
+
+	/* Disable the CSI2 rx inerrupts */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_mask_addr);
+
+	/* Reset the Rx CFG registers */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg0_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_cfg1_addr);
+}
+
+static int cam_ife_csid_init_config_pxl_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg             *path_data;
+	const struct cam_ife_csid_reg_offset     *csid_reg;
+	struct cam_hw_soc_info                   *soc_info;
+	const struct cam_ife_csid_pxl_reg_offset *pxl_reg = NULL;
+	bool                                      is_ipp;
+	uint32_t decode_format = 0, plain_format = 0, val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg  *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		is_ipp = true;
+		pxl_reg = csid_reg->ipp_reg;
+	} else {
+		is_ipp = false;
+		pxl_reg = csid_reg->ppp_reg;
+	}
+
+	if (!pxl_reg) {
+		CAM_ERR(CAM_ISP, "CSID:%d %s:%d is not supported on HW",
+			csid_hw->hw_intf->hw_idx,
+			(is_ipp) ? "IPP" : "PPP", res->res_id);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Config %s Path", (is_ipp) ? "IPP" : "PPP");
+	rc = cam_ife_csid_get_format_ipp_ppp(path_data->in_format,
+		&decode_format, &plain_format);
+	if (rc)
+		return rc;
+
+	/*
+	 * configure Pxl path and enable the time stamp capture.
+	 * enable the HW measrurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(decode_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(path_data->crop_enable <<
+		csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 1) | 1;
+
+	val |= (1 << pxl_reg->pix_store_en_shift_val);
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg0_addr);
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg1_addr);
+
+	/* select the post irq sub sample strobe for time stamp capture */
+	val |= CSID_TIMESTAMP_STB_POST_IRQ;
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg1_addr);
+
+	if (path_data->crop_enable) {
+		val = (((path_data->end_pixel & 0xFFFF) <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF));
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_hcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
+
+		val = (((path_data->end_line & 0xFFFF) <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF));
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_vcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
+
+		/* Enable generating early eof strobe based on crop config */
+		if (!(csid_hw->csid_debug & CSID_DEBUG_DISABLE_EARLY_EOF)) {
+			val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+				pxl_reg->csid_pxl_cfg0_addr);
+			val |= (1 << pxl_reg->early_eof_en_shift_val);
+			cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+				pxl_reg->csid_pxl_cfg0_addr);
+		}
+	}
+
+	/* set frame drop pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_frm_drop_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_frm_drop_pattern_addr);
+	/* set irq sub sample pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_irq_subsample_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_irq_subsample_pattern_addr);
+	/* set pxl drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_pix_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_pix_drop_period_addr);
+	/* set line drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_line_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_line_drop_period_addr);
+
+
+	/* Enable the Pxl path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO)
+		val |= csid_reg->cmn_reg->format_measure_en_val;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_cfg0_addr);
+
+	/* Enable the HBI/VBI counter */
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_format_measure_cfg0_addr);
+		val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+		cam_io_w_mb(val,
+			soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_format_measure_cfg0_addr);
+	}
+
+	/* configure the rx packet capture based on csid debug set */
+	val = 0;
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val = ((1 <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift));
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr);
+	CAM_DBG(CAM_ISP, "rx capture control value 0x%x", val);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_ife_csid_deinit_pxl_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	uint32_t val;
+	const struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+	const struct cam_ife_csid_pxl_reg_offset  *pxl_reg = NULL;
+	bool                                       is_ipp;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		is_ipp = true;
+		pxl_reg = csid_reg->ipp_reg;
+	} else {
+		is_ipp = false;
+		pxl_reg = csid_reg->ppp_reg;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d %s Res type %d res_id:%d in wrong state %d",
+			csid_hw->hw_intf->hw_idx,
+			(is_ipp) ? "IPP" : "PPP",
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+	}
+
+	if (!pxl_reg) {
+		CAM_ERR(CAM_ISP, "CSID:%d %s %d is not supported on HW",
+			csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_cfg0_addr);
+	if (val & csid_reg->cmn_reg->format_measure_en_val) {
+		val &= ~csid_reg->cmn_reg->format_measure_en_val;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_cfg0_addr);
+
+		/* Disable the HBI/VBI counter */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_format_measure_cfg0_addr);
+		val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			pxl_reg->csid_pxl_format_measure_cfg0_addr);
+	}
+
+end:
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_ife_csid_enable_pxl_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	const struct cam_ife_csid_reg_offset     *csid_reg;
+	struct cam_hw_soc_info                   *soc_info;
+	struct cam_ife_csid_path_cfg             *path_data;
+	const struct cam_ife_csid_pxl_reg_offset *pxl_reg = NULL;
+	bool                                      is_ipp;
+	uint32_t                                  val = 0;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		is_ipp = true;
+		pxl_reg = csid_reg->ipp_reg;
+	} else {
+		is_ipp = false;
+		pxl_reg = csid_reg->ppp_reg;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d %s path res type:%d res_id:%d Invalid state%d",
+			csid_hw->hw_intf->hw_idx,
+			(is_ipp) ? "IPP" : "PPP",
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	if (!pxl_reg) {
+		CAM_ERR(CAM_ISP, "CSID:%d %s %d not supported on HW",
+			csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+			res->res_id);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Enable %s path", (is_ipp) ? "IPP" : "PPP");
+
+	/* Set master or slave path */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER)
+		/*Set halt mode as master */
+		val = CSID_HALT_MODE_MASTER << 2;
+	else if (path_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		/*Set halt mode as slave and set master idx */
+		val = path_data->master_idx  << 4 | CSID_HALT_MODE_SLAVE << 2;
+	else
+		/* Default is internal halt mode */
+		val = 0;
+
+	/*
+	 * Resume at frame boundary if Master or No Sync.
+	 * Slave will get resume command from Master.
+	 */
+	if (path_data->sync_mode == CAM_ISP_HW_SYNC_MASTER ||
+		path_data->sync_mode == CAM_ISP_HW_SYNC_NONE)
+		val |= CAM_CSID_RESUME_AT_FRAME_BOUNDARY;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_ctrl_addr);
+
+	CAM_DBG(CAM_ISP, "CSID:%d %s Ctrl val: 0x%x",
+			csid_hw->hw_intf->hw_idx,
+			(is_ipp) ? "IPP" : "PPP", val);
+
+	/* Enable the required pxl path interrupts */
+	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)
+		val |= CSID_PATH_INFO_INPUT_SOF;
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)
+		val |= CSID_PATH_INFO_INPUT_EOF;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_irq_mask_addr);
+
+	CAM_DBG(CAM_ISP, "Enable %s IRQ mask 0x%x",
+		(is_ipp) ? "IPP" : "PPP", val);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+static int cam_ife_csid_disable_pxl_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_ife_csid_halt_cmd       stop_cmd)
+{
+	int rc = 0;
+	const struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+	struct cam_ife_csid_path_cfg               *path_data;
+	const struct cam_ife_csid_pxl_reg_offset   *pxl_reg;
+	bool                                        is_ipp;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in stopped state:%d",
+			csid_hw->hw_intf->hw_idx, res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		is_ipp = true;
+		pxl_reg = csid_reg->ipp_reg;
+	} else {
+		is_ipp = false;
+		pxl_reg = csid_reg->ppp_reg;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CAM_DBG(CAM_ISP, "CSID:%d %s path Res:%d Invalid state%d",
+			csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+			res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	if (!pxl_reg) {
+		CAM_ERR(CAM_ISP, "CSID:%d &s %d is not supported on HW",
+			csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		CAM_ERR(CAM_ISP, "CSID:%d %s path un supported stop command:%d",
+			csid_hw->hw_intf->hw_idx, (is_ipp) ? "IPP" : "PPP",
+			stop_cmd);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d %s path",
+		csid_hw->hw_intf->hw_idx, res->res_id,
+		(is_ipp) ? "IPP" : "PPP");
+
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		pxl_reg->csid_pxl_irq_mask_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_init_config_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	struct cam_ife_csid_path_cfg           *path_data;
+	const struct cam_ife_csid_reg_offset   *csid_reg;
+	struct cam_hw_soc_info                 *soc_info;
+	uint32_t path_format = 0, plain_fmt = 0, val = 0, id;
+	uint32_t format_measure_addr;
+
+	path_data = (struct cam_ife_csid_path_cfg   *) res->res_priv;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	id = res->res_id;
+	if (!csid_reg->rdi_reg[id]) {
+		CAM_ERR(CAM_ISP, "CSID:%d RDI:%d is not supported on HW",
+			 csid_hw->hw_intf->hw_idx, id);
+		return -EINVAL;
+	}
+
+	rc = cam_ife_csid_get_format_rdi(path_data->in_format,
+		path_data->out_format, &path_format, &plain_fmt);
+	if (rc)
+		return rc;
+
+	/* if path decode format is payload only then RDI crop is not applied */
+	if (path_format == 0xF)
+		path_data->crop_enable = 0;
+
+	/*
+	 * RDI path config and enable the time stamp capture
+	 * Enable the measurement blocks
+	 */
+	val = (path_data->vc << csid_reg->cmn_reg->vc_shift_val) |
+		(path_data->dt << csid_reg->cmn_reg->dt_shift_val) |
+		(path_data->cid << csid_reg->cmn_reg->dt_id_shift_val) |
+		(path_format << csid_reg->cmn_reg->fmt_shift_val) |
+		(plain_fmt << csid_reg->cmn_reg->plain_fmt_shit_val) |
+		(path_data->crop_enable  <<
+			csid_reg->cmn_reg->crop_h_en_shift_val) |
+		(path_data->crop_enable  <<
+		csid_reg->cmn_reg->crop_v_en_shift_val) |
+		(1 << 2) | 3;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	/* select the post irq sub sample strobe for time stamp capture */
+	cam_io_w_mb(CSID_TIMESTAMP_STB_POST_IRQ, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg1_addr);
+
+	if (path_data->crop_enable) {
+		val = (((path_data->end_pixel & 0xFFFF) <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_pixel & 0xFFFF));
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_rpp_hcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Horizontal crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
+
+		val = (((path_data->end_line & 0xFFFF) <<
+			csid_reg->cmn_reg->crop_shift) |
+			(path_data->start_line & 0xFFFF));
+
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_rpp_vcrop_addr);
+		CAM_DBG(CAM_ISP, "CSID:%d Vertical Crop config val: 0x%x",
+			csid_hw->hw_intf->hw_idx, val);
+	}
+	/* set frame drop pattern to 0 and period to 1 */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_frm_drop_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_frm_drop_pattern_addr);
+	/* set IRQ sum sabmple */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_period_addr);
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_subsample_pattern_addr);
+
+	/* set pixel drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_pix_drop_period_addr);
+	/* set line drop pattern to 0 and period to 1 */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_pattern_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_rpp_line_drop_period_addr);
+
+	/* Configure the halt mode */
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the RPP path */
+	val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+	val |= (1 << csid_reg->cmn_reg->path_en_shift_val);
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO)
+		val |= csid_reg->cmn_reg->format_measure_en_val;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+	format_measure_addr =
+		csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr;
+
+	/* Enable the HBI/VBI counter */
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			format_measure_addr);
+		val |= csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+		cam_io_w_mb(val,
+			soc_info->reg_map[0].mem_base + format_measure_addr);
+	}
+
+	/* configure the rx packet capture based on csid debug set */
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE)
+		val = ((1 <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_en_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_short_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_long_pkt_vc_shift));
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE)
+		val |= ((1 <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_en_shift) |
+			(path_data->dt <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_dt_shift) |
+			(path_data->vc <<
+			csid_reg->csi2_reg->csi2_capture_cphy_pkt_vc_shift));
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_capture_ctrl_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+
+	return rc;
+}
+
+static int cam_ife_csid_deinit_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	int rc = 0;
+	uint32_t id, val, format_measure_addr;
+	const struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+		res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		!csid_reg->rdi_reg[id]) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res id%d state:%d",
+			csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	format_measure_addr =
+		csid_reg->rdi_reg[id]->csid_rdi_format_measure_cfg0_addr;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+		val &= ~csid_reg->cmn_reg->format_measure_en_val;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_cfg0_addr);
+
+		/* Disable the HBI/VBI counter */
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			format_measure_addr);
+		val &= ~csid_reg->cmn_reg->measure_en_hbi_vbi_cnt_mask;
+		cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+			format_measure_addr);
+	}
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_ife_csid_enable_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res)
+{
+	const struct cam_ife_csid_reg_offset      *csid_reg;
+	struct cam_hw_soc_info                    *soc_info;
+	uint32_t id, val;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3 ||
+		!csid_reg->rdi_reg[id]) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d invalid res type:%d res_id:%d state%d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		return -EINVAL;
+	}
+
+	/*resume at frame boundary */
+	cam_io_w_mb(CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+			soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[id]->csid_rdi_ctrl_addr);
+
+	/* Enable the required RDI interrupts */
+	val = CSID_PATH_INFO_RST_DONE | CSID_PATH_ERROR_FIFO_OVERFLOW;
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)
+		val |= CSID_PATH_INFO_INPUT_SOF;
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ)
+		val |= CSID_PATH_INFO_INPUT_EOF;
+
+	cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+	res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+
+static int cam_ife_csid_disable_rdi_path(
+	struct cam_ife_csid_hw          *csid_hw,
+	struct cam_isp_resource_node    *res,
+	enum cam_ife_csid_halt_cmd                stop_cmd)
+{
+	int rc = 0;
+	uint32_t id;
+	const struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	id = res->res_id;
+
+	if ((res->res_id > CAM_IFE_PIX_PATH_RES_RDI_3) ||
+		(!csid_reg->rdi_reg[res->res_id])) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d Invalid res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_id);
+		return -EINVAL;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_INIT_HW ||
+		res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID:%d Res:%d already in stopped state:%d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_id, res->res_state);
+		return rc;
+	}
+
+	if (res->res_state != CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"CSID:%d Res:%d Invalid res_state%d",
+			csid_hw->hw_intf->hw_idx, res->res_id,
+			res->res_state);
+		return -EINVAL;
+	}
+
+	if (stop_cmd != CAM_CSID_HALT_AT_FRAME_BOUNDARY &&
+		stop_cmd != CAM_CSID_HALT_IMMEDIATELY) {
+		CAM_ERR(CAM_ISP, "CSID:%d un supported stop command:%d",
+			csid_hw->hw_intf->hw_idx, stop_cmd);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res_id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_id);
+
+	cam_io_w_mb(0, soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[id]->csid_rdi_irq_mask_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_get_hbi_vbi(
+	struct cam_ife_csid_hw   *csid_hw,
+	struct cam_isp_resource_node *res)
+{
+	uint32_t  hbi, vbi;
+	const struct cam_ife_csid_reg_offset     *csid_reg;
+	const struct cam_ife_csid_rdi_reg_offset *rdi_reg;
+	struct cam_hw_soc_info                   *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_format_measure1_addr);
+		vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_format_measure2_addr);
+	} else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) {
+		hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_format_measure1_addr);
+		vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_format_measure2_addr);
+	} else {
+		rdi_reg = csid_reg->rdi_reg[res->res_id];
+		hbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_format_measure1_addr);
+		vbi = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_format_measure2_addr);
+	}
+
+	CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u HBI: 0x%x", res->res_id,
+		hbi);
+	CAM_INFO_RATE_LIMIT(CAM_ISP, "Resource %u VBI: 0x%x", res->res_id,
+		vbi);
+
+	return 0;
+}
+
+
+static int cam_ife_csid_get_time_stamp(
+		struct cam_ife_csid_hw   *csid_hw, void *cmd_args)
+{
+	struct cam_csid_get_time_stamp_args        *time_stamp;
+	struct cam_isp_resource_node               *res;
+	const struct cam_ife_csid_reg_offset       *csid_reg;
+	struct cam_hw_soc_info                     *soc_info;
+	const struct cam_ife_csid_rdi_reg_offset   *rdi_reg;
+	struct timespec64 ts;
+	uint32_t  time_32, id;
+
+	time_stamp = (struct cam_csid_get_time_stamp_args  *)cmd_args;
+	res = time_stamp->node_res;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (res->res_type != CAM_ISP_RESOURCE_PIX_PATH ||
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res_type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	if (csid_hw->hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid dev state :%d",
+			csid_hw->hw_intf->hw_idx,
+			csid_hw->hw_info->hw_state);
+		return -EINVAL;
+	}
+
+	if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP) {
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = (uint64_t) time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_timestamp_curr0_sof_addr);
+	} else if (res->res_id == CAM_IFE_PIX_PATH_RES_PPP) {
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = (uint64_t) time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_timestamp_curr0_sof_addr);
+	} else {
+		id = res->res_id;
+		rdi_reg = csid_reg->rdi_reg[id];
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_timestamp_curr1_sof_addr);
+		time_stamp->time_stamp_val = (uint64_t) time_32;
+		time_stamp->time_stamp_val = time_stamp->time_stamp_val << 32;
+
+		time_32 = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			rdi_reg->csid_rdi_timestamp_curr0_sof_addr);
+	}
+
+	time_stamp->time_stamp_val |= (uint64_t) time_32;
+	time_stamp->time_stamp_val = mul_u64_u32_div(
+		time_stamp->time_stamp_val,
+		CAM_IFE_CSID_QTIMER_MUL_FACTOR,
+		CAM_IFE_CSID_QTIMER_DIV_FACTOR);
+
+	get_monotonic_boottime64(&ts);
+	time_stamp->boot_timestamp = (uint64_t)((ts.tv_sec * 1000000000) +
+		ts.tv_nsec);
+
+	return 0;
+}
+
+static int cam_ife_csid_set_csid_debug(struct cam_ife_csid_hw   *csid_hw,
+	void *cmd_args)
+{
+	uint32_t  *csid_debug;
+
+	csid_debug = (uint32_t  *) cmd_args;
+	csid_hw->csid_debug = *csid_debug;
+	CAM_DBG(CAM_ISP, "CSID:%d set csid debug value:%d",
+		csid_hw->hw_intf->hw_idx, csid_hw->csid_debug);
+
+	return 0;
+}
+
+static int cam_ife_csid_get_hw_caps(void *hw_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw_caps           *hw_caps;
+	struct cam_ife_csid_hw                *csid_hw;
+	struct cam_hw_info                    *csid_hw_info;
+	const struct cam_ife_csid_reg_offset  *csid_reg;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	csid_reg = csid_hw->csid_info->csid_reg;
+	hw_caps = (struct cam_ife_csid_hw_caps *) get_hw_cap_args;
+
+	hw_caps->num_rdis = csid_reg->cmn_reg->num_rdis;
+	hw_caps->num_pix = csid_reg->cmn_reg->num_pix;
+	hw_caps->num_ppp = csid_reg->cmn_reg->num_ppp;
+	hw_caps->major_version = csid_reg->cmn_reg->major_version;
+	hw_caps->minor_version = csid_reg->cmn_reg->minor_version;
+	hw_caps->version_incr = csid_reg->cmn_reg->version_incr;
+
+	CAM_DBG(CAM_ISP,
+		"CSID:%d No rdis:%d, no pix:%d, major:%d minor:%d ver :%d",
+		csid_hw->hw_intf->hw_idx, hw_caps->num_rdis,
+		hw_caps->num_pix, hw_caps->major_version,
+		hw_caps->minor_version, hw_caps->version_incr);
+
+	return rc;
+}
+
+static int cam_ife_csid_reset(void *hw_priv,
+	void *reset_args, uint32_t arg_size)
+{
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_csid_reset_cfg_args  *reset;
+	int rc = 0;
+
+	if (!hw_priv || !reset_args || (arg_size !=
+		sizeof(struct cam_csid_reset_cfg_args))) {
+		CAM_ERR(CAM_ISP, "CSID:Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	reset   = (struct cam_csid_reset_cfg_args  *)reset_args;
+
+	switch (reset->reset_type) {
+	case CAM_IFE_CSID_RESET_GLOBAL:
+		rc = cam_ife_csid_global_reset(csid_hw);
+		break;
+	case CAM_IFE_CSID_RESET_PATH:
+		rc = cam_ife_csid_path_reset(csid_hw, reset);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:Invalid reset type :%d",
+			reset->reset_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_ife_csid_reserve(void *hw_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                    *csid_hw;
+	struct cam_hw_info                        *csid_hw_info;
+	struct cam_csid_hw_reserve_resource_args  *reserv;
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_csid_hw_reserve_resource_args))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	reserv = (struct cam_csid_hw_reserve_resource_args  *)reserve_args;
+
+	CAM_DBG(CAM_ISP, "res_type %d, CSID: %u",
+		reserv->res_type, csid_hw->hw_intf->hw_idx);
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	switch (reserv->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_cid_reserve(csid_hw, reserv);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		rc = cam_ife_csid_path_reserve(csid_hw, reserv);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type :%d",
+			csid_hw->hw_intf->hw_idx, reserv->res_type);
+		rc = -EINVAL;
+		break;
+	}
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_release(void *hw_priv,
+	void *release_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw          *csid_hw;
+	struct cam_hw_info              *csid_hw_info;
+	struct cam_isp_resource_node    *res;
+	struct cam_ife_csid_cid_data    *cid_data;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)release_args;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_DBG(CAM_ISP,
+			"CSID:%d res type:%d Res %d  in released state",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id);
+		goto end;
+	}
+
+	if (res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP,
+			"CSID:%d res type:%d Res id:%d invalid state:%d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res type :%d Resource id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		cid_data = (struct cam_ife_csid_cid_data    *) res->res_priv;
+		if (cid_data->cnt)
+			cid_data->cnt--;
+
+		if (!cid_data->cnt)
+			res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+		if (csid_hw->csi2_reserve_cnt)
+			csid_hw->csi2_reserve_cnt--;
+
+		if (!csid_hw->csi2_reserve_cnt)
+			memset(&csid_hw->csi2_rx_cfg, 0,
+				sizeof(struct cam_ife_csid_csi2_rx_cfg));
+
+		CAM_DBG(CAM_ISP, "CSID:%d res id :%d cnt:%d reserv cnt:%d",
+			 csid_hw->hw_intf->hw_idx,
+			res->res_id, cid_data->cnt, csid_hw->csi2_reserve_cnt);
+
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		break;
+	}
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_reset_retain_sw_reg(
+	struct cam_ife_csid_hw *csid_hw)
+{
+	int rc = 0;
+	uint32_t status;
+	const struct cam_ife_csid_reg_offset *csid_reg =
+		csid_hw->csid_info->csid_reg;
+	struct cam_hw_soc_info          *soc_info;
+
+	soc_info = &csid_hw->hw_info->soc_info;
+	/* clear the top interrupt first */
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	cam_io_w_mb(csid_reg->cmn_reg->csid_rst_stb,
+		soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_rst_strobes_addr);
+	rc = readl_poll_timeout(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr,
+			status, (status & 0x1) == 0x1,
+		CAM_IFE_CSID_TIMEOUT_SLEEP_US, CAM_IFE_CSID_TIMEOUT_ALL_US);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d csid_reset fail rc = %d",
+			  csid_hw->hw_intf->hw_idx, rc);
+		rc = -ETIMEDOUT;
+	} else {
+		CAM_DBG(CAM_ISP, "CSID:%d hw reset completed %d",
+			csid_hw->hw_intf->hw_idx, rc);
+		rc = 0;
+	}
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_clear_addr);
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	return rc;
+}
+
+static int cam_ife_csid_init_hw(void *hw_priv,
+	void *init_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	const struct cam_ife_csid_reg_offset   *csid_reg;
+
+	if (!hw_priv || !init_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res      = (struct cam_isp_resource_node *)init_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res tpe:%d res id%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if ((res->res_type == CAM_ISP_RESOURCE_PIX_PATH) &&
+		(res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED)) {
+		CAM_ERR(CAM_ISP,
+			"CSID:%d res type:%d res_id:%dInvalid state %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id, res->res_state);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	CAM_DBG(CAM_ISP, "CSID:%d res type :%d res_id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	/* Initialize the csid hardware */
+	rc = cam_ife_csid_enable_hw(csid_hw);
+	if (rc)
+		goto end;
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		rc = cam_ife_csid_enable_csi2(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP ||
+			res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
+			rc = cam_ife_csid_init_config_pxl_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_init_config_rdi_path(csid_hw, res);
+
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type state %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		break;
+	}
+
+	rc = cam_ife_csid_reset_retain_sw_reg(csid_hw);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP, "CSID: Failed in SW reset");
+
+	if (rc)
+		cam_ife_csid_disable_hw(csid_hw);
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_deinit_hw(void *hw_priv,
+	void *deinit_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+
+	if (!hw_priv || !deinit_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "CSID:Invalid arguments");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "Enter");
+	res = (struct cam_isp_resource_node *)deinit_args;
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	mutex_lock(&csid_hw->hw_info->hw_mutex);
+	if (res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP, "CSID:%d Res:%d already in De-init state",
+			 csid_hw->hw_intf->hw_idx,
+			res->res_id);
+		goto end;
+	}
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		CAM_DBG(CAM_ISP, "De-Init ife_csid");
+		rc = cam_ife_csid_disable_csi2(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		CAM_DBG(CAM_ISP, "De-Init Pix Path: %d\n", res->res_id);
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP ||
+			res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
+			rc = cam_ife_csid_deinit_pxl_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_deinit_rdi_path(csid_hw, res);
+
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid Res type %d",
+			 csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		goto end;
+	}
+
+	/* Disable CSID HW */
+	CAM_DBG(CAM_ISP, "Disabling CSID Hw\n");
+	cam_ife_csid_disable_hw(csid_hw);
+	CAM_DBG(CAM_ISP, "%s: Exit\n", __func__);
+
+end:
+	mutex_unlock(&csid_hw->hw_info->hw_mutex);
+	return rc;
+}
+
+static int cam_ife_csid_start(void *hw_priv, void *start_args,
+			uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw                 *csid_hw;
+	struct cam_hw_info                     *csid_hw_info;
+	struct cam_isp_resource_node           *res;
+	const struct cam_ife_csid_reg_offset   *csid_reg;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	res = (struct cam_isp_resource_node *)start_args;
+	csid_reg = csid_hw->csid_info->csid_reg;
+
+	if ((res->res_type == CAM_ISP_RESOURCE_CID &&
+		res->res_id >= CAM_IFE_CSID_CID_MAX) ||
+		(res->res_type == CAM_ISP_RESOURCE_PIX_PATH &&
+		res->res_id >= CAM_IFE_PIX_PATH_RES_MAX)) {
+		CAM_DBG(CAM_ISP, "CSID:%d Invalid res tpe:%d res id:%d",
+			csid_hw->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	/* Reset sof irq debug fields */
+	csid_hw->sof_irq_triggered = false;
+	csid_hw->irq_debug_cnt = 0;
+
+	CAM_DBG(CAM_ISP, "CSID:%d res_type :%d res_id:%d",
+		csid_hw->hw_intf->hw_idx, res->res_type, res->res_id);
+
+	switch (res->res_type) {
+	case CAM_ISP_RESOURCE_CID:
+		if (csid_hw->res_type ==  CAM_ISP_IFE_IN_RES_TPG)
+			rc = cam_ife_csid_tpg_start(csid_hw, res);
+		break;
+	case CAM_ISP_RESOURCE_PIX_PATH:
+		if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP ||
+			res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
+			rc = cam_ife_csid_enable_pxl_path(csid_hw, res);
+		else
+			rc = cam_ife_csid_enable_rdi_path(csid_hw, res);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+			 csid_hw->hw_intf->hw_idx,
+			res->res_type);
+		break;
+	}
+end:
+	return rc;
+}
+
+static int cam_ife_csid_stop(void *hw_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_isp_resource_node         *res;
+	struct cam_csid_hw_stop_args         *csid_stop;
+	uint32_t  i;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_csid_hw_stop_args))) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+	csid_stop = (struct cam_csid_hw_stop_args  *) stop_args;
+
+	if (!csid_stop->num_res) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid args");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+	CAM_DBG(CAM_ISP, "CSID:%d num_res %d",
+		csid_hw->hw_intf->hw_idx,
+		csid_stop->num_res);
+
+	/* Stop the resource first */
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		CAM_DBG(CAM_ISP, "CSID:%d res_type %d res_id %d",
+			csid_hw->hw_intf->hw_idx,
+			res->res_type, res->res_id);
+		switch (res->res_type) {
+		case CAM_ISP_RESOURCE_CID:
+			if (csid_hw->res_type == CAM_ISP_IFE_IN_RES_TPG)
+				rc = cam_ife_csid_tpg_stop(csid_hw, res);
+			break;
+		case CAM_ISP_RESOURCE_PIX_PATH:
+			if (res->res_id == CAM_IFE_PIX_PATH_RES_IPP ||
+				res->res_id == CAM_IFE_PIX_PATH_RES_PPP)
+				rc = cam_ife_csid_disable_pxl_path(csid_hw,
+					res, csid_stop->stop_cmd);
+			else
+				rc = cam_ife_csid_disable_rdi_path(csid_hw,
+					res, csid_stop->stop_cmd);
+
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "CSID:%d Invalid res type%d",
+				csid_hw->hw_intf->hw_idx,
+				res->res_type);
+			break;
+		}
+	}
+
+	for (i = 0; i < csid_stop->num_res; i++) {
+		res = csid_stop->node_res[i];
+		res->res_state = CAM_ISP_RESOURCE_STATE_INIT_HW;
+	}
+
+	CAM_DBG(CAM_ISP,  "%s: Exit\n", __func__);
+
+	return rc;
+
+}
+
+static int cam_ife_csid_read(void *hw_priv,
+	void *read_args, uint32_t arg_size)
+{
+	CAM_ERR(CAM_ISP, "CSID: un supported");
+
+	return -EINVAL;
+}
+
+static int cam_ife_csid_write(void *hw_priv,
+	void *write_args, uint32_t arg_size)
+{
+	CAM_ERR(CAM_ISP, "CSID: un supported");
+	return -EINVAL;
+}
+
+static int cam_ife_csid_sof_irq_debug(
+	struct cam_ife_csid_hw *csid_hw, void *cmd_args)
+{
+	int i = 0;
+	uint32_t val = 0;
+	bool sof_irq_enable = false;
+	const struct cam_ife_csid_reg_offset    *csid_reg;
+	struct cam_hw_soc_info                  *soc_info;
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+
+	if (*((uint32_t *)cmd_args) == 1)
+		sof_irq_enable = true;
+
+	if (csid_hw->hw_info->hw_state ==
+		CAM_HW_STATE_POWER_DOWN) {
+		CAM_WARN(CAM_ISP,
+			"CSID powered down unable to %s sof irq",
+			(sof_irq_enable == true) ? "enable" : "disable");
+		return 0;
+	}
+
+	if (csid_reg->ipp_reg) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+
+		if (val) {
+			if (sof_irq_enable)
+				val |= CSID_PATH_INFO_INPUT_SOF;
+			else
+				val &= ~CSID_PATH_INFO_INPUT_SOF;
+
+			cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_pxl_irq_mask_addr);
+			val = 0;
+		}
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+		if (val) {
+			if (sof_irq_enable)
+				val |= CSID_PATH_INFO_INPUT_SOF;
+			else
+				val &= ~CSID_PATH_INFO_INPUT_SOF;
+
+			cam_io_w_mb(val, soc_info->reg_map[0].mem_base +
+				csid_reg->rdi_reg[i]->csid_rdi_irq_mask_addr);
+			val = 0;
+		}
+	}
+
+	if (sof_irq_enable) {
+		csid_hw->csid_debug |= CSID_DEBUG_ENABLE_SOF_IRQ;
+		csid_hw->sof_irq_triggered = true;
+	} else {
+		csid_hw->csid_debug &= ~CSID_DEBUG_ENABLE_SOF_IRQ;
+		csid_hw->sof_irq_triggered = false;
+	}
+
+	CAM_INFO(CAM_ISP, "SOF freeze: CSID SOF irq %s",
+		(sof_irq_enable == true) ? "enabled" : "disabled");
+
+	return 0;
+}
+
+static int cam_ife_csid_set_csid_clock(
+	struct cam_ife_csid_hw *csid_hw, void *cmd_args)
+{
+	struct cam_ife_csid_clock_update_args *clk_update = NULL;
+
+	if (!csid_hw)
+		return -EINVAL;
+
+	clk_update =
+		(struct cam_ife_csid_clock_update_args *)cmd_args;
+
+	csid_hw->clk_rate = clk_update->clk_rate;
+	CAM_INFO(CAM_ISP, "CSID clock rate %llu", csid_hw->clk_rate);
+
+	return 0;
+}
+
+static int cam_ife_csid_process_cmd(void *hw_priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_ife_csid_hw               *csid_hw;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_isp_resource_node         *res = NULL;
+
+	if (!hw_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
+		return -EINVAL;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *)hw_priv;
+	csid_hw = (struct cam_ife_csid_hw   *)csid_hw_info->core_info;
+
+	switch (cmd_type) {
+	case CAM_IFE_CSID_CMD_GET_TIME_STAMP:
+		rc = cam_ife_csid_get_time_stamp(csid_hw, cmd_args);
+		if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_HBI_VBI_INFO) {
+			res = ((struct cam_csid_get_time_stamp_args *)
+				cmd_args)->node_res;
+			cam_ife_csid_get_hbi_vbi(csid_hw, res);
+		}
+		break;
+	case CAM_IFE_CSID_SET_CSID_DEBUG:
+		rc = cam_ife_csid_set_csid_debug(csid_hw, cmd_args);
+		break;
+	case CAM_IFE_CSID_SOF_IRQ_DEBUG:
+		rc = cam_ife_csid_sof_irq_debug(csid_hw, cmd_args);
+		break;
+	case CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE:
+		rc = cam_ife_csid_set_csid_clock(csid_hw, cmd_args);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "CSID:%d unsupported cmd:%d",
+			csid_hw->hw_intf->hw_idx, cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+
+}
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data)
+{
+	struct cam_ife_csid_hw                         *csid_hw;
+	struct cam_hw_soc_info                         *soc_info;
+	const struct cam_ife_csid_reg_offset           *csid_reg;
+	const struct cam_ife_csid_csi2_rx_reg_offset   *csi2_reg;
+	uint32_t i, irq_status_top, irq_status_rx, irq_status_ipp = 0;
+	uint32_t irq_status_rdi[4] = {0, 0, 0, 0};
+	uint32_t val, irq_status_ppp = 0;
+	bool fatal_err_detected = false;
+	uint32_t sof_irq_debug_en = 0;
+
+	csid_hw = (struct cam_ife_csid_hw *)data;
+
+	CAM_DBG(CAM_ISP, "CSID %d IRQ Handling", csid_hw->hw_intf->hw_idx);
+
+	if (!data) {
+		CAM_ERR(CAM_ISP, "CSID: Invalid arguments");
+		return IRQ_HANDLED;
+	}
+
+	csid_reg = csid_hw->csid_info->csid_reg;
+	soc_info = &csid_hw->hw_info->soc_info;
+	csi2_reg = csid_reg->csi2_reg;
+
+	/* read */
+	irq_status_top = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_top_irq_status_addr);
+
+	irq_status_rx = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_status_addr);
+
+	if (csid_reg->cmn_reg->num_pix)
+		irq_status_ipp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_status_addr);
+
+	if (csid_reg->cmn_reg->num_ppp)
+		irq_status_ppp = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_irq_status_addr);
+
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++)
+		irq_status_rdi[i] = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		csid_reg->rdi_reg[i]->csid_rdi_irq_status_addr);
+
+	/* clear */
+	cam_io_w_mb(irq_status_rx, soc_info->reg_map[0].mem_base +
+		csid_reg->csi2_reg->csid_csi2_rx_irq_clear_addr);
+	if (csid_reg->cmn_reg->num_pix)
+		cam_io_w_mb(irq_status_ipp, soc_info->reg_map[0].mem_base +
+			csid_reg->ipp_reg->csid_pxl_irq_clear_addr);
+
+	if (csid_reg->cmn_reg->num_ppp)
+		cam_io_w_mb(irq_status_ppp, soc_info->reg_map[0].mem_base +
+			csid_reg->ppp_reg->csid_pxl_irq_clear_addr);
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		cam_io_w_mb(irq_status_rdi[i], soc_info->reg_map[0].mem_base +
+			csid_reg->rdi_reg[i]->csid_rdi_irq_clear_addr);
+	}
+	cam_io_w_mb(1, soc_info->reg_map[0].mem_base +
+		csid_reg->cmn_reg->csid_irq_cmd_addr);
+
+	CAM_DBG(CAM_ISP, "irq_status_top = 0x%x", irq_status_top);
+	CAM_DBG(CAM_ISP, "irq_status_rx = 0x%x", irq_status_rx);
+	CAM_DBG(CAM_ISP, "irq_status_ipp = 0x%x", irq_status_ipp);
+	CAM_DBG(CAM_ISP, "irq_status_ppp = 0x%x", irq_status_ppp);
+	CAM_DBG(CAM_ISP, "irq_status_rdi0= 0x%x", irq_status_rdi[0]);
+	CAM_DBG(CAM_ISP, "irq_status_rdi1= 0x%x", irq_status_rdi[1]);
+	CAM_DBG(CAM_ISP, "irq_status_rdi2= 0x%x", irq_status_rdi[2]);
+
+	if (irq_status_rx & BIT(csid_reg->csi2_reg->csi2_rst_done_shift_val)) {
+		CAM_DBG(CAM_ISP, "csi rx reset complete");
+		complete(&csid_hw->csid_csi2_complete);
+	}
+
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 0 over flow",
+			 csid_hw->hw_intf->hw_idx);
+		fatal_err_detected = true;
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 1 over flow",
+			 csid_hw->hw_intf->hw_idx);
+		fatal_err_detected = true;
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 2 over flow",
+			 csid_hw->hw_intf->hw_idx);
+		fatal_err_detected = true;
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d lane 3 over flow",
+			 csid_hw->hw_intf->hw_idx);
+		fatal_err_detected = true;
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d TG OVER FLOW",
+			 csid_hw->hw_intf->hw_idx);
+		fatal_err_detected = true;
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_EOT_RECEPTION",
+			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_SOT_RECEPTION",
+			 csid_hw->hw_intf->hw_idx);
+		csid_hw->error_irq_count++;
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CPHY_PH_CRC) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PH_CRC",
+			 csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_CRC) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_CRC",
+			 csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_ECC) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_ECC",
+			 csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_MMAPPED_VC_DT) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d MMAPPED_VC_DT",
+			 csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d ERROR_STREAM_UNDERFLOW",
+			 csid_hw->hw_intf->hw_idx);
+	}
+	if (irq_status_rx & CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "CSID:%d UNBOUNDED_FRAME",
+			 csid_hw->hw_intf->hw_idx);
+	}
+
+	if (csid_hw->error_irq_count >
+		CAM_IFE_CSID_MAX_IRQ_ERROR_COUNT) {
+		fatal_err_detected = true;
+		csid_hw->error_irq_count = 0;
+	}
+
+	if (fatal_err_detected)
+		cam_ife_csid_halt_csi2(csid_hw);
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOT_IRQ) {
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL0_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL1_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL2_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL3_EOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+	}
+
+	if (csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOT_IRQ) {
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL0_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL1_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL2_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+		if (irq_status_rx & CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PHY_DL3_SOT_CAPTURED",
+				csid_hw->hw_intf->hw_idx);
+		}
+	}
+
+	if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE) &&
+		(irq_status_rx & CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED)) {
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d LONG_PKT_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_long_pkt_0_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"CSID:%d long packet VC :%d DT:%d WC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x3F), (val & 0xFFFF));
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_long_pkt_1_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d long packet ECC :%d",
+			csid_hw->hw_intf->hw_idx, val);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_long_pkt_ftr_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"CSID:%d long pkt cal CRC:%d expected CRC:%d",
+			csid_hw->hw_intf->hw_idx, (val >> 16), (val & 0xFFFF));
+	}
+	if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE) &&
+		(irq_status_rx & CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED)) {
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d SHORT_PKT_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_short_pkt_0_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"CSID:%d short pkt VC :%d DT:%d LC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_short_pkt_1_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d short packet ECC :%d",
+			csid_hw->hw_intf->hw_idx, val);
+	}
+
+	if ((csid_hw->csid_debug & CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE) &&
+		(irq_status_rx & CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED)) {
+		CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d CPHY_PKT_HDR_CAPTURED",
+			csid_hw->hw_intf->hw_idx);
+		val = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+			csi2_reg->csid_csi2_rx_captured_cphy_pkt_hdr_addr);
+		CAM_INFO_RATE_LIMIT(CAM_ISP,
+			"CSID:%d cphy packet VC :%d DT:%d WC:%d",
+			csid_hw->hw_intf->hw_idx,
+			(val >> 22), ((val >> 16) & 0x1F), (val & 0xFFFF));
+	}
+
+	/*read the IPP errors */
+	if (csid_reg->cmn_reg->num_pix) {
+		/* IPP reset done bit */
+		if (irq_status_ipp &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CAM_DBG(CAM_ISP, "CSID IPP reset complete");
+			complete(&csid_hw->csid_ipp_complete);
+		}
+
+		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_SOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP SOF received",
+				csid_hw->hw_intf->hw_idx);
+			if (csid_hw->sof_irq_triggered)
+				csid_hw->irq_debug_cnt++;
+		}
+
+		if ((irq_status_ipp & CSID_PATH_INFO_INPUT_EOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d IPP EOF received",
+				csid_hw->hw_intf->hw_idx);
+
+		if (irq_status_ipp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d IPP fifo over flow",
+				csid_hw->hw_intf->hw_idx);
+			/* Stop IPP path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->ipp_reg->csid_pxl_ctrl_addr);
+		}
+	}
+
+	/*read PPP errors */
+	if (csid_reg->cmn_reg->num_ppp) {
+		/* PPP reset done bit */
+		if (irq_status_ppp &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CAM_DBG(CAM_ISP, "CSID PPP reset complete");
+			complete(&csid_hw->csid_ppp_complete);
+		}
+
+		if ((irq_status_ppp & CSID_PATH_INFO_INPUT_SOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP SOF received",
+				csid_hw->hw_intf->hw_idx);
+			if (csid_hw->sof_irq_triggered)
+				csid_hw->irq_debug_cnt++;
+		}
+
+		if ((irq_status_ppp & CSID_PATH_INFO_INPUT_EOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
+			CAM_INFO_RATE_LIMIT(CAM_ISP, "CSID:%d PPP EOF received",
+				csid_hw->hw_intf->hw_idx);
+
+		if (irq_status_ppp & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d PPP fifo over flow",
+				csid_hw->hw_intf->hw_idx);
+			/* Stop PPP path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->ppp_reg->csid_pxl_ctrl_addr);
+		}
+	}
+
+	for (i = 0; i < csid_reg->cmn_reg->num_rdis; i++) {
+		if (irq_status_rdi[i] &
+			BIT(csid_reg->cmn_reg->path_rst_done_shift_val)) {
+			CAM_DBG(CAM_ISP, "CSID RDI%d reset complete", i);
+			complete(&csid_hw->csid_rdin_complete[i]);
+		}
+
+		if ((irq_status_rdi[i] & CSID_PATH_INFO_INPUT_SOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_SOF_IRQ)) {
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID RDI:%d SOF received", i);
+			if (csid_hw->sof_irq_triggered)
+				csid_hw->irq_debug_cnt++;
+		}
+
+		if ((irq_status_rdi[i]  & CSID_PATH_INFO_INPUT_EOF) &&
+			(csid_hw->csid_debug & CSID_DEBUG_ENABLE_EOF_IRQ))
+			CAM_INFO_RATE_LIMIT(CAM_ISP,
+				"CSID RDI:%d EOF received", i);
+
+		if (irq_status_rdi[i] & CSID_PATH_ERROR_FIFO_OVERFLOW) {
+			CAM_ERR_RATE_LIMIT(CAM_ISP,
+				"CSID:%d RDI fifo over flow",
+				csid_hw->hw_intf->hw_idx);
+			/* Stop RDI path immediately */
+			cam_io_w_mb(CAM_CSID_HALT_IMMEDIATELY,
+				soc_info->reg_map[0].mem_base +
+				csid_reg->rdi_reg[i]->csid_rdi_ctrl_addr);
+		}
+	}
+
+	if (csid_hw->irq_debug_cnt >= CAM_CSID_IRQ_SOF_DEBUG_CNT_MAX) {
+		cam_ife_csid_sof_irq_debug(csid_hw, &sof_irq_debug_en);
+		csid_hw->irq_debug_cnt = 0;
+	}
+
+	CAM_DBG(CAM_ISP, "IRQ Handling exit");
+	return IRQ_HANDLED;
+}
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+	uint32_t num_paths;
+	struct cam_ife_csid_path_cfg         *path_data;
+	struct cam_ife_csid_cid_data         *cid_data;
+	struct cam_hw_info                   *csid_hw_info;
+	struct cam_ife_csid_hw               *ife_csid_hw = NULL;
+
+	if (csid_idx >= CAM_IFE_CSID_HW_RES_MAX) {
+		CAM_ERR(CAM_ISP, "Invalid csid index:%d", csid_idx);
+		return rc;
+	}
+
+	csid_hw_info = (struct cam_hw_info  *) csid_hw_intf->hw_priv;
+	ife_csid_hw  = (struct cam_ife_csid_hw  *) csid_hw_info->core_info;
+
+	ife_csid_hw->hw_intf = csid_hw_intf;
+	ife_csid_hw->hw_info = csid_hw_info;
+
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		ife_csid_hw->hw_intf->hw_type, csid_idx);
+
+
+	ife_csid_hw->hw_info->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&ife_csid_hw->hw_info->hw_mutex);
+	spin_lock_init(&ife_csid_hw->hw_info->hw_lock);
+	init_completion(&ife_csid_hw->hw_info->hw_complete);
+
+	init_completion(&ife_csid_hw->csid_top_complete);
+	init_completion(&ife_csid_hw->csid_csi2_complete);
+	init_completion(&ife_csid_hw->csid_ipp_complete);
+	init_completion(&ife_csid_hw->csid_ppp_complete);
+	for (i = 0; i < CAM_IFE_CSID_RDI_MAX; i++)
+		init_completion(&ife_csid_hw->csid_rdin_complete[i]);
+
+
+	rc = cam_ife_csid_init_soc_resources(&ife_csid_hw->hw_info->soc_info,
+			cam_ife_csid_irq, ife_csid_hw);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "CSID:%d Failed to init_soc", csid_idx);
+		goto err;
+	}
+
+	ife_csid_hw->hw_intf->hw_ops.get_hw_caps = cam_ife_csid_get_hw_caps;
+	ife_csid_hw->hw_intf->hw_ops.init        = cam_ife_csid_init_hw;
+	ife_csid_hw->hw_intf->hw_ops.deinit      = cam_ife_csid_deinit_hw;
+	ife_csid_hw->hw_intf->hw_ops.reset       = cam_ife_csid_reset;
+	ife_csid_hw->hw_intf->hw_ops.reserve     = cam_ife_csid_reserve;
+	ife_csid_hw->hw_intf->hw_ops.release     = cam_ife_csid_release;
+	ife_csid_hw->hw_intf->hw_ops.start       = cam_ife_csid_start;
+	ife_csid_hw->hw_intf->hw_ops.stop        = cam_ife_csid_stop;
+	ife_csid_hw->hw_intf->hw_ops.read        = cam_ife_csid_read;
+	ife_csid_hw->hw_intf->hw_ops.write       = cam_ife_csid_write;
+	ife_csid_hw->hw_intf->hw_ops.process_cmd = cam_ife_csid_process_cmd;
+
+	num_paths = ife_csid_hw->csid_info->csid_reg->cmn_reg->num_pix +
+		ife_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+	/* Initialize the CID resource */
+	for (i = 0; i < num_paths; i++) {
+		ife_csid_hw->cid_res[i].res_type = CAM_ISP_RESOURCE_CID;
+		ife_csid_hw->cid_res[i].res_id = i;
+		ife_csid_hw->cid_res[i].res_state  =
+					CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->cid_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+		cid_data = kzalloc(sizeof(struct cam_ife_csid_cid_data),
+					GFP_KERNEL);
+		if (!cid_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->cid_res[i].res_priv = cid_data;
+	}
+
+	/* Initialize the IPP resources */
+	if (ife_csid_hw->csid_info->csid_reg->cmn_reg->num_pix) {
+		ife_csid_hw->ipp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->ipp_res.res_id = CAM_IFE_PIX_PATH_RES_IPP;
+		ife_csid_hw->ipp_res.res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->ipp_res.hw_intf = ife_csid_hw->hw_intf;
+		path_data = kzalloc(sizeof(*path_data),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->ipp_res.res_priv = path_data;
+	}
+
+	/* Initialize PPP resource */
+	if (ife_csid_hw->csid_info->csid_reg->cmn_reg->num_ppp) {
+		ife_csid_hw->ppp_res.res_type = CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->ppp_res.res_id = CAM_IFE_PIX_PATH_RES_PPP;
+		ife_csid_hw->ppp_res.res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->ppp_res.hw_intf = ife_csid_hw->hw_intf;
+		path_data = kzalloc(sizeof(*path_data),
+					GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->ppp_res.res_priv = path_data;
+	}
+
+	/* Initialize the RDI resource */
+	for (i = 0; i < ife_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+				i++) {
+		/* res type is from RDI 0 to RDI3 */
+		ife_csid_hw->rdi_res[i].res_type =
+			CAM_ISP_RESOURCE_PIX_PATH;
+		ife_csid_hw->rdi_res[i].res_id = i;
+		ife_csid_hw->rdi_res[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		ife_csid_hw->rdi_res[i].hw_intf = ife_csid_hw->hw_intf;
+
+		path_data = kzalloc(sizeof(*path_data),
+			GFP_KERNEL);
+		if (!path_data) {
+			rc = -ENOMEM;
+			goto err;
+		}
+		ife_csid_hw->rdi_res[i].res_priv = path_data;
+	}
+
+	ife_csid_hw->csid_debug = 0;
+	ife_csid_hw->error_irq_count = 0;
+
+	return 0;
+err:
+	if (rc) {
+		kfree(ife_csid_hw->ipp_res.res_priv);
+		kfree(ife_csid_hw->ppp_res.res_priv);
+		for (i = 0; i <
+			ife_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+			i++)
+			kfree(ife_csid_hw->rdi_res[i].res_priv);
+
+		for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+			kfree(ife_csid_hw->cid_res[i].res_priv);
+
+	}
+
+	return rc;
+}
+
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw)
+{
+	int rc = -EINVAL;
+	uint32_t i;
+
+	if (!ife_csid_hw) {
+		CAM_ERR(CAM_ISP, "Invalid param");
+		return rc;
+	}
+
+	/* release the privdate data memory from resources */
+	kfree(ife_csid_hw->ipp_res.res_priv);
+	kfree(ife_csid_hw->ppp_res.res_priv);
+	for (i = 0; i <
+		ife_csid_hw->csid_info->csid_reg->cmn_reg->num_rdis;
+		i++) {
+		kfree(ife_csid_hw->rdi_res[i].res_priv);
+	}
+	for (i = 0; i < CAM_IFE_CSID_CID_RES_MAX; i++)
+		kfree(ife_csid_hw->cid_res[i].res_priv);
+
+	cam_ife_csid_deinit_soc_resources(&ife_csid_hw->hw_info->soc_info);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
new file mode 100644
index 0000000..77f6475
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_core.h
@@ -0,0 +1,480 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IFE_CSID_HW_H_
+#define _CAM_IFE_CSID_HW_H_
+
+#include "cam_hw.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_ife_csid_soc.h"
+
+#define CAM_IFE_CSID_HW_RES_MAX      4
+#define CAM_IFE_CSID_CID_RES_MAX     4
+#define CAM_IFE_CSID_RDI_MAX         4
+
+#define CSID_CSI2_RX_INFO_PHY_DL0_EOT_CAPTURED    BIT(0)
+#define CSID_CSI2_RX_INFO_PHY_DL1_EOT_CAPTURED    BIT(1)
+#define CSID_CSI2_RX_INFO_PHY_DL2_EOT_CAPTURED    BIT(2)
+#define CSID_CSI2_RX_INFO_PHY_DL3_EOT_CAPTURED    BIT(3)
+#define CSID_CSI2_RX_INFO_PHY_DL0_SOT_CAPTURED    BIT(4)
+#define CSID_CSI2_RX_INFO_PHY_DL1_SOT_CAPTURED    BIT(5)
+#define CSID_CSI2_RX_INFO_PHY_DL2_SOT_CAPTURED    BIT(6)
+#define CSID_CSI2_RX_INFO_PHY_DL3_SOT_CAPTURED    BIT(7)
+#define CSID_CSI2_RX_INFO_LONG_PKT_CAPTURED       BIT(8)
+#define CSID_CSI2_RX_INFO_SHORT_PKT_CAPTURED      BIT(9)
+#define CSID_CSI2_RX_INFO_CPHY_PKT_HDR_CAPTURED   BIT(10)
+#define CSID_CSI2_RX_ERROR_CPHY_EOT_RECEPTION     BIT(11)
+#define CSID_CSI2_RX_ERROR_CPHY_SOT_RECEPTION     BIT(12)
+#define CSID_CSI2_RX_ERROR_CPHY_PH_CRC            BIT(13)
+#define CSID_CSI2_RX_WARNING_ECC                  BIT(14)
+#define CSID_CSI2_RX_ERROR_LANE0_FIFO_OVERFLOW    BIT(15)
+#define CSID_CSI2_RX_ERROR_LANE1_FIFO_OVERFLOW    BIT(16)
+#define CSID_CSI2_RX_ERROR_LANE2_FIFO_OVERFLOW    BIT(17)
+#define CSID_CSI2_RX_ERROR_LANE3_FIFO_OVERFLOW    BIT(18)
+#define CSID_CSI2_RX_ERROR_CRC                    BIT(19)
+#define CSID_CSI2_RX_ERROR_ECC                    BIT(20)
+#define CSID_CSI2_RX_ERROR_MMAPPED_VC_DT          BIT(21)
+#define CSID_CSI2_RX_ERROR_UNMAPPED_VC_DT         BIT(22)
+#define CSID_CSI2_RX_ERROR_STREAM_UNDERFLOW       BIT(23)
+#define CSID_CSI2_RX_ERROR_UNBOUNDED_FRAME        BIT(24)
+#define CSID_CSI2_RX_INFO_TG_DONE                 BIT(25)
+#define CSID_CSI2_RX_ERROR_TG_FIFO_OVERFLOW       BIT(26)
+#define CSID_CSI2_RX_INFO_RST_DONE                BIT(27)
+
+#define CSID_PATH_INFO_RST_DONE                   BIT(1)
+#define CSID_PATH_ERROR_FIFO_OVERFLOW             BIT(2)
+#define CSID_PATH_INFO_SUBSAMPLED_EOF             BIT(3)
+#define CSID_PATH_INFO_SUBSAMPLED_SOF             BIT(4)
+#define CSID_PATH_INFO_FRAME_DROP_EOF             BIT(5)
+#define CSID_PATH_INFO_FRAME_DROP_EOL             BIT(6)
+#define CSID_PATH_INFO_FRAME_DROP_SOL             BIT(7)
+#define CSID_PATH_INFO_FRAME_DROP_SOF             BIT(8)
+#define CSID_PATH_INFO_INPUT_EOF                  BIT(9)
+#define CSID_PATH_INFO_INPUT_EOL                  BIT(10)
+#define CSID_PATH_INFO_INPUT_SOL                  BIT(11)
+#define CSID_PATH_INFO_INPUT_SOF                  BIT(12)
+#define CSID_PATH_ERROR_PIX_COUNT                 BIT(13)
+#define CSID_PATH_ERROR_LINE_COUNT                BIT(14)
+
+/*
+ * Debug values enable the corresponding interrupts and debug logs provide
+ * necessary information
+ */
+#define CSID_DEBUG_ENABLE_SOF_IRQ                 BIT(0)
+#define CSID_DEBUG_ENABLE_EOF_IRQ                 BIT(1)
+#define CSID_DEBUG_ENABLE_SOT_IRQ                 BIT(2)
+#define CSID_DEBUG_ENABLE_EOT_IRQ                 BIT(3)
+#define CSID_DEBUG_ENABLE_SHORT_PKT_CAPTURE       BIT(4)
+#define CSID_DEBUG_ENABLE_LONG_PKT_CAPTURE        BIT(5)
+#define CSID_DEBUG_ENABLE_CPHY_PKT_CAPTURE        BIT(6)
+#define CSID_DEBUG_ENABLE_HBI_VBI_INFO            BIT(7)
+#define CSID_DEBUG_DISABLE_EARLY_EOF              BIT(8)
+
+/* enum cam_csid_path_halt_mode select the path halt mode control */
+enum cam_csid_path_halt_mode {
+	CSID_HALT_MODE_INTERNAL,
+	CSID_HALT_MODE_GLOBAL,
+	CSID_HALT_MODE_MASTER,
+	CSID_HALT_MODE_SLAVE,
+};
+
+/**
+ *enum cam_csid_path_timestamp_stb_sel - select the sof/eof strobes used to
+ *        capture the timestamp
+ */
+enum cam_csid_path_timestamp_stb_sel {
+	CSID_TIMESTAMP_STB_PRE_HALT,
+	CSID_TIMESTAMP_STB_POST_HALT,
+	CSID_TIMESTAMP_STB_POST_IRQ,
+	CSID_TIMESTAMP_STB_MAX,
+};
+
+struct cam_ife_csid_pxl_reg_offset {
+	/* Pxl path register offsets*/
+	uint32_t csid_pxl_irq_status_addr;
+	uint32_t csid_pxl_irq_mask_addr;
+	uint32_t csid_pxl_irq_clear_addr;
+	uint32_t csid_pxl_irq_set_addr;
+
+	uint32_t csid_pxl_cfg0_addr;
+	uint32_t csid_pxl_cfg1_addr;
+	uint32_t csid_pxl_ctrl_addr;
+	uint32_t csid_pxl_frm_drop_pattern_addr;
+	uint32_t csid_pxl_frm_drop_period_addr;
+	uint32_t csid_pxl_irq_subsample_pattern_addr;
+	uint32_t csid_pxl_irq_subsample_period_addr;
+	uint32_t csid_pxl_hcrop_addr;
+	uint32_t csid_pxl_vcrop_addr;
+	uint32_t csid_pxl_pix_drop_pattern_addr;
+	uint32_t csid_pxl_pix_drop_period_addr;
+	uint32_t csid_pxl_line_drop_pattern_addr;
+	uint32_t csid_pxl_line_drop_period_addr;
+	uint32_t csid_pxl_rst_strobes_addr;
+	uint32_t csid_pxl_status_addr;
+	uint32_t csid_pxl_misr_val_addr;
+	uint32_t csid_pxl_format_measure_cfg0_addr;
+	uint32_t csid_pxl_format_measure_cfg1_addr;
+	uint32_t csid_pxl_format_measure0_addr;
+	uint32_t csid_pxl_format_measure1_addr;
+	uint32_t csid_pxl_format_measure2_addr;
+	uint32_t csid_pxl_timestamp_curr0_sof_addr;
+	uint32_t csid_pxl_timestamp_curr1_sof_addr;
+	uint32_t csid_pxl_timestamp_perv0_sof_addr;
+	uint32_t csid_pxl_timestamp_perv1_sof_addr;
+	uint32_t csid_pxl_timestamp_curr0_eof_addr;
+	uint32_t csid_pxl_timestamp_curr1_eof_addr;
+	uint32_t csid_pxl_timestamp_perv0_eof_addr;
+	uint32_t csid_pxl_timestamp_perv1_eof_addr;
+
+	/* configuration */
+	uint32_t  pix_store_en_shift_val;
+	uint32_t  early_eof_en_shift_val;
+};
+
+struct cam_ife_csid_rdi_reg_offset {
+	uint32_t csid_rdi_irq_status_addr;
+	uint32_t csid_rdi_irq_mask_addr;
+	uint32_t csid_rdi_irq_clear_addr;
+	uint32_t csid_rdi_irq_set_addr;
+
+	/*RDI N register address */
+	uint32_t csid_rdi_cfg0_addr;
+	uint32_t csid_rdi_cfg1_addr;
+	uint32_t csid_rdi_ctrl_addr;
+	uint32_t csid_rdi_frm_drop_pattern_addr;
+	uint32_t csid_rdi_frm_drop_period_addr;
+	uint32_t csid_rdi_irq_subsample_pattern_addr;
+	uint32_t csid_rdi_irq_subsample_period_addr;
+	uint32_t csid_rdi_rpp_hcrop_addr;
+	uint32_t csid_rdi_rpp_vcrop_addr;
+	uint32_t csid_rdi_rpp_pix_drop_pattern_addr;
+	uint32_t csid_rdi_rpp_pix_drop_period_addr;
+	uint32_t csid_rdi_rpp_line_drop_pattern_addr;
+	uint32_t csid_rdi_rpp_line_drop_period_addr;
+	uint32_t csid_rdi_yuv_chroma_conversion_addr;
+	uint32_t csid_rdi_rst_strobes_addr;
+	uint32_t csid_rdi_status_addr;
+	uint32_t csid_rdi_misr_val0_addr;
+	uint32_t csid_rdi_misr_val1_addr;
+	uint32_t csid_rdi_misr_val2_addr;
+	uint32_t csid_rdi_misr_val3_addr;
+	uint32_t csid_rdi_format_measure_cfg0_addr;
+	uint32_t csid_rdi_format_measure_cfg1_addr;
+	uint32_t csid_rdi_format_measure0_addr;
+	uint32_t csid_rdi_format_measure1_addr;
+	uint32_t csid_rdi_format_measure2_addr;
+	uint32_t csid_rdi_timestamp_curr0_sof_addr;
+	uint32_t csid_rdi_timestamp_curr1_sof_addr;
+	uint32_t csid_rdi_timestamp_prev0_sof_addr;
+	uint32_t csid_rdi_timestamp_prev1_sof_addr;
+	uint32_t csid_rdi_timestamp_curr0_eof_addr;
+	uint32_t csid_rdi_timestamp_curr1_eof_addr;
+	uint32_t csid_rdi_timestamp_prev0_eof_addr;
+	uint32_t csid_rdi_timestamp_prev1_eof_addr;
+	uint32_t csid_rdi_byte_cntr_ping_addr;
+	uint32_t csid_rdi_byte_cntr_pong_addr;
+};
+
+struct cam_ife_csid_csi2_rx_reg_offset {
+	uint32_t csid_csi2_rx_irq_status_addr;
+	uint32_t csid_csi2_rx_irq_mask_addr;
+	uint32_t csid_csi2_rx_irq_clear_addr;
+	uint32_t csid_csi2_rx_irq_set_addr;
+	uint32_t csid_csi2_rx_cfg0_addr;
+	uint32_t csid_csi2_rx_cfg1_addr;
+	uint32_t csid_csi2_rx_capture_ctrl_addr;
+	uint32_t csid_csi2_rx_rst_strobes_addr;
+	uint32_t csid_csi2_rx_de_scramble_cfg0_addr;
+	uint32_t csid_csi2_rx_de_scramble_cfg1_addr; /* */
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr;
+	uint32_t csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_short_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_0_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_1_addr;
+	uint32_t csid_csi2_rx_captured_long_pkt_ftr_addr;
+	uint32_t csid_csi2_rx_captured_cphy_pkt_hdr_addr;
+	uint32_t csid_csi2_rx_lane0_misr_addr;
+	uint32_t csid_csi2_rx_lane1_misr_addr;
+	uint32_t csid_csi2_rx_lane2_misr_addr;
+	uint32_t csid_csi2_rx_lane3_misr_addr;
+	uint32_t csid_csi2_rx_total_pkts_rcvd_addr;
+	uint32_t csid_csi2_rx_stats_ecc_addr;
+	uint32_t csid_csi2_rx_total_crc_err_addr;
+
+	/*configurations */
+	uint32_t csi2_rst_srb_all;
+	uint32_t csi2_rst_done_shift_val;
+	uint32_t csi2_irq_mask_all;
+	uint32_t csi2_misr_enable_shift_val;
+	uint32_t csi2_vc_mode_shift_val;
+	uint32_t csi2_capture_long_pkt_en_shift;
+	uint32_t csi2_capture_short_pkt_en_shift;
+	uint32_t csi2_capture_cphy_pkt_en_shift;
+	uint32_t csi2_capture_long_pkt_dt_shift;
+	uint32_t csi2_capture_long_pkt_vc_shift;
+	uint32_t csi2_capture_short_pkt_vc_shift;
+	uint32_t csi2_capture_cphy_pkt_dt_shift;
+	uint32_t csi2_capture_cphy_pkt_vc_shift;
+};
+
+struct cam_ife_csid_csi2_tpg_reg_offset {
+	uint32_t csid_tpg_ctrl_addr;
+	uint32_t csid_tpg_vc_cfg0_addr;
+	uint32_t csid_tpg_vc_cfg1_addr;
+	uint32_t csid_tpg_lfsr_seed_addr;
+	uint32_t csid_tpg_dt_n_cfg_0_addr;
+	uint32_t csid_tpg_dt_n_cfg_1_addr;
+	uint32_t csid_tpg_dt_n_cfg_2_addr;
+	uint32_t csid_tpg_color_bars_cfg_addr;
+	uint32_t csid_tpg_color_box_cfg_addr;
+	uint32_t csid_tpg_common_gen_cfg_addr;
+	uint32_t csid_tpg_cgen_n_cfg_addr;
+	uint32_t csid_tpg_cgen_n_x0_addr;
+	uint32_t csid_tpg_cgen_n_x1_addr;
+	uint32_t csid_tpg_cgen_n_x2_addr;
+	uint32_t csid_tpg_cgen_n_xy_addr;
+	uint32_t csid_tpg_cgen_n_y1_addr;
+	uint32_t csid_tpg_cgen_n_y2_addr;
+
+	/*configurations */
+	uint32_t tpg_dtn_cfg_offset;
+	uint32_t tpg_cgen_cfg_offset;
+	uint32_t tpg_cpas_ife_reg_offset;
+
+};
+struct cam_ife_csid_common_reg_offset {
+	/* MIPI CSID registers */
+	uint32_t csid_hw_version_addr;
+	uint32_t csid_cfg0_addr;
+	uint32_t csid_ctrl_addr;
+	uint32_t csid_reset_addr;
+	uint32_t csid_rst_strobes_addr;
+
+	uint32_t csid_test_bus_ctrl_addr;
+	uint32_t csid_top_irq_status_addr;
+	uint32_t csid_top_irq_mask_addr;
+	uint32_t csid_top_irq_clear_addr;
+	uint32_t csid_top_irq_set_addr;
+	uint32_t csid_irq_cmd_addr;
+
+	/*configurations */
+	uint32_t major_version;
+	uint32_t minor_version;
+	uint32_t version_incr;
+	uint32_t num_rdis;
+	uint32_t num_pix;
+	uint32_t num_ppp;
+	uint32_t csid_rst_stb;
+	uint32_t csid_rst_stb_sw_all;
+	uint32_t path_rst_stb_all;
+	uint32_t path_rst_done_shift_val;
+	uint32_t path_en_shift_val;
+	uint32_t dt_id_shift_val;
+	uint32_t vc_shift_val;
+	uint32_t dt_shift_val;
+	uint32_t fmt_shift_val;
+	uint32_t plain_fmt_shit_val;
+	uint32_t crop_v_en_shift_val;
+	uint32_t crop_h_en_shift_val;
+	uint32_t crop_shift;
+	uint32_t ipp_irq_mask_all;
+	uint32_t rdi_irq_mask_all;
+	uint32_t ppp_irq_mask_all;
+	uint32_t measure_en_hbi_vbi_cnt_mask;
+	uint32_t format_measure_en_val;
+};
+
+/**
+ * struct cam_ife_csid_reg_offset- CSID instance register info
+ *
+ * @cmn_reg:  csid common registers info
+ * @ipp_reg:  ipp register offset information
+ * @ppp_reg:  ppp register offset information
+ * @rdi_reg:  rdi register offser information
+ *
+ */
+struct cam_ife_csid_reg_offset {
+	const struct cam_ife_csid_common_reg_offset   *cmn_reg;
+	const struct cam_ife_csid_csi2_rx_reg_offset  *csi2_reg;
+	const struct cam_ife_csid_pxl_reg_offset      *ipp_reg;
+	const struct cam_ife_csid_pxl_reg_offset      *ppp_reg;
+	const struct cam_ife_csid_rdi_reg_offset *rdi_reg[CAM_IFE_CSID_RDI_MAX];
+	const struct cam_ife_csid_csi2_tpg_reg_offset *tpg_reg;
+};
+
+
+/**
+ * struct cam_ife_csid_hw_info- CSID HW info
+ *
+ * @csid_reg:        csid register offsets
+ * @hw_dts_version:  HW DTS version
+ * @csid_max_clk:    maximim csid clock
+ *
+ */
+struct cam_ife_csid_hw_info {
+	const struct cam_ife_csid_reg_offset   *csid_reg;
+	uint32_t                                hw_dts_version;
+	uint32_t                                csid_max_clk;
+
+};
+
+
+
+/**
+ * struct cam_ife_csid_csi2_rx_cfg- csid csi2 rx configuration data
+ * @phy_sel:     input resource type for sensor only
+ * @lane_type:   lane type: c-phy or d-phy
+ * @lane_num :   active lane number
+ * @lane_cfg:    lane configurations: 4 bits per lane
+ *
+ */
+struct cam_ife_csid_csi2_rx_cfg  {
+	uint32_t                        phy_sel;
+	uint32_t                        lane_type;
+	uint32_t                        lane_num;
+	uint32_t                        lane_cfg;
+};
+
+/**
+ * struct             cam_ife_csid_tpg_cfg- csid tpg configuration data
+ * @width:            width
+ * @height:           height
+ * @test_pattern :    pattern
+ * @in_format:        decode format
+ * @usage_type:       whether dual isp is required
+ *
+ */
+struct cam_ife_csid_tpg_cfg  {
+	uint32_t                        width;
+	uint32_t                        height;
+	uint32_t                        test_pattern;
+	uint32_t                        in_format;
+	uint32_t                        usage_type;
+};
+
+/**
+ * struct cam_ife_csid_cid_data- cid configuration private data
+ *
+ * @vc:          Virtual channel
+ * @dt:          Data type
+ * @cnt:         Cid resource reference count.
+ * @tpg_set:     Tpg used for this cid resource
+ *
+ */
+struct cam_ife_csid_cid_data {
+	uint32_t                     vc;
+	uint32_t                     dt;
+	uint32_t                     cnt;
+	uint32_t                     tpg_set;
+};
+
+
+/**
+ * struct cam_ife_csid_path_cfg- csid path configuration details. It is stored
+ *                          as private data for IPP/ RDI paths
+ * @vc :            Virtual channel number
+ * @dt :            Data type number
+ * @cid             cid number, it is same as DT_ID number in HW
+ * @in_format:      input decode format
+ * @out_format:     output format
+ * @crop_enable:    crop is enable or disabled, if enabled
+ *                  then remaining parameters are valid.
+ * @start_pixel:    start pixel
+ * @end_pixel:      end_pixel
+ * @width:          width
+ * @start_line:     start line
+ * @end_line:       end_line
+ * @height:         heigth
+ * @sync_mode:       Applicable for IPP/RDI path reservation
+ *                  Reserving the path for master IPP or slave IPP
+ *                  master (set value 1), Slave ( set value 2)
+ *                  for RDI, set  mode to none
+ * @master_idx:     For Slave reservation, Give master IFE instance Index.
+ *                  Slave will synchronize with master Start and stop operations
+ * @clk_rate        Clock rate
+ *
+ */
+struct cam_ife_csid_path_cfg {
+	uint32_t                        vc;
+	uint32_t                        dt;
+	uint32_t                        cid;
+	uint32_t                        in_format;
+	uint32_t                        out_format;
+	bool                            crop_enable;
+	uint32_t                        start_pixel;
+	uint32_t                        end_pixel;
+	uint32_t                        width;
+	uint32_t                        start_line;
+	uint32_t                        end_line;
+	uint32_t                        height;
+	enum cam_isp_hw_sync_mode       sync_mode;
+	uint32_t                        master_idx;
+	uint64_t                        clk_rate;
+};
+
+/**
+ * struct cam_ife_csid_hw- csid hw device resources data
+ *
+ * @hw_intf:                  contain the csid hw interface information
+ * @hw_info:                  csid hw device information
+ * @csid_info:                csid hw specific information
+ * @res_type:                 CSID in resource type
+ * @csi2_rx_cfg:              Csi2 rx decoder configuration for csid
+ * @tpg_cfg:                  TPG configuration
+ * @csi2_rx_reserve_cnt:      CSI2 reservations count value
+ * @csi2_cfg_cnt:             csi2 configuration count
+ * @tpg_start_cnt:            tpg start count
+ * @ipp_res:                  image pixel path resource
+ * @ppp_res:                  phase pxl path resource
+ * @rdi_res:                  raw dump image path resources
+ * @cid_res:                  cid resources state
+ * @csid_top_reset_complete:  csid top reset completion
+ * @csid_csi2_reset_complete: csi2 reset completion
+ * @csid_ipp_reset_complete:  ipp reset completion
+ * @csid_ppp_complete:        ppp reset completion
+ * @csid_rdin_reset_complete: rdi n completion
+ * @csid_debug:               csid debug information to enable the SOT, EOT,
+ *                            SOF, EOF, measure etc in the csid hw
+ * @clk_rate                  Clock rate
+ * @sof_irq_triggered:        Flag is set on receiving event to enable sof irq
+ *                            incase of SOF freeze.
+ * @irq_debug_cnt:            Counter to track sof irq's when above flag is set.
+ * @error_irq_count           Error IRQ count, if continuous error irq comes
+ *                            need to stop the CSID and mask interrupts.
+ *
+ */
+struct cam_ife_csid_hw {
+	struct cam_hw_intf              *hw_intf;
+	struct cam_hw_info              *hw_info;
+	struct cam_ife_csid_hw_info     *csid_info;
+	uint32_t                         res_type;
+	struct cam_ife_csid_csi2_rx_cfg  csi2_rx_cfg;
+	struct cam_ife_csid_tpg_cfg      tpg_cfg;
+	uint32_t                         csi2_reserve_cnt;
+	uint32_t                         csi2_cfg_cnt;
+	uint32_t                         tpg_start_cnt;
+	struct cam_isp_resource_node     ipp_res;
+	struct cam_isp_resource_node     ppp_res;
+	struct cam_isp_resource_node     rdi_res[CAM_IFE_CSID_RDI_MAX];
+	struct cam_isp_resource_node     cid_res[CAM_IFE_CSID_CID_RES_MAX];
+	struct completion                csid_top_complete;
+	struct completion                csid_csi2_complete;
+	struct completion                csid_ipp_complete;
+	struct completion                csid_ppp_complete;
+	struct completion    csid_rdin_complete[CAM_IFE_CSID_RDI_MAX];
+	uint64_t                         csid_debug;
+	uint64_t                         clk_rate;
+	bool                             sof_irq_triggered;
+	uint32_t                         irq_debug_cnt;
+	uint32_t                         error_irq_count;
+};
+
+int cam_ife_csid_hw_probe_init(struct cam_hw_intf  *csid_hw_intf,
+	uint32_t csid_idx);
+
+int cam_ife_csid_hw_deinit(struct cam_ife_csid_hw *ife_csid_hw);
+
+#endif /* _CAM_IFE_CSID_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
new file mode 100644
index 0000000..ec32d57
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+#include "cam_ife_csid_hw_intf.h"
+#include "cam_debug_util.h"
+
+static struct cam_hw_intf *cam_ife_csid_hw_list[CAM_IFE_CSID_HW_RES_MAX] = {
+	0, 0, 0, 0};
+
+static char csid_dev_name[8];
+
+int cam_ife_csid_probe(struct platform_device *pdev)
+{
+
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+	struct cam_ife_csid_hw         *csid_dev = NULL;
+	const struct of_device_id      *match_dev = NULL;
+	struct cam_ife_csid_hw_info    *csid_hw_data = NULL;
+	uint32_t                        csid_dev_idx;
+	int                             rc = 0;
+
+	CAM_DBG(CAM_ISP, "probe called");
+
+	csid_hw_intf = kzalloc(sizeof(*csid_hw_intf), GFP_KERNEL);
+	if (!csid_hw_intf) {
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	csid_hw_info = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!csid_hw_info) {
+		rc = -ENOMEM;
+		goto free_hw_intf;
+	}
+
+	csid_dev = kzalloc(sizeof(struct cam_ife_csid_hw), GFP_KERNEL);
+	if (!csid_dev) {
+		rc = -ENOMEM;
+		goto free_hw_info;
+	}
+
+	/* get ife csid hw index */
+	of_property_read_u32(pdev->dev.of_node, "cell-index", &csid_dev_idx);
+	/* get ife csid hw information */
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ISP, "No matching table for the IFE CSID HW!");
+		rc = -EINVAL;
+		goto free_dev;
+	}
+
+	memset(csid_dev_name, 0, sizeof(csid_dev_name));
+	snprintf(csid_dev_name, sizeof(csid_dev_name),
+		"csid%1u", csid_dev_idx);
+
+	csid_hw_intf->hw_idx = csid_dev_idx;
+	csid_hw_intf->hw_type = CAM_ISP_HW_TYPE_IFE_CSID;
+	csid_hw_intf->hw_priv = csid_hw_info;
+
+	csid_hw_info->core_info = csid_dev;
+	csid_hw_info->soc_info.pdev = pdev;
+	csid_hw_info->soc_info.dev = &pdev->dev;
+	csid_hw_info->soc_info.dev_name = csid_dev_name;
+	csid_hw_info->soc_info.index = csid_dev_idx;
+
+	csid_hw_data = (struct cam_ife_csid_hw_info  *)match_dev->data;
+	/* need to setup the pdev before call the ife hw probe init */
+	csid_dev->csid_info = csid_hw_data;
+
+	rc = cam_ife_csid_hw_probe_init(csid_hw_intf, csid_dev_idx);
+	if (rc)
+		goto free_dev;
+
+	platform_set_drvdata(pdev, csid_dev);
+	CAM_DBG(CAM_ISP, "CSID:%d probe successful",
+		csid_hw_intf->hw_idx);
+
+
+	if (csid_hw_intf->hw_idx < CAM_IFE_CSID_HW_RES_MAX)
+		cam_ife_csid_hw_list[csid_hw_intf->hw_idx] = csid_hw_intf;
+	else
+		goto free_dev;
+
+	return 0;
+
+free_dev:
+	kfree(csid_dev);
+free_hw_info:
+	kfree(csid_hw_info);
+free_hw_intf:
+	kfree(csid_hw_intf);
+err:
+	return rc;
+}
+
+int cam_ife_csid_remove(struct platform_device *pdev)
+{
+	struct cam_ife_csid_hw         *csid_dev = NULL;
+	struct cam_hw_intf             *csid_hw_intf;
+	struct cam_hw_info             *csid_hw_info;
+
+	csid_dev = (struct cam_ife_csid_hw *)platform_get_drvdata(pdev);
+	csid_hw_intf = csid_dev->hw_intf;
+	csid_hw_info = csid_dev->hw_info;
+
+	CAM_DBG(CAM_ISP, "CSID:%d remove",
+		csid_dev->hw_intf->hw_idx);
+
+	cam_ife_csid_hw_deinit(csid_dev);
+
+	/*release the csid device memory */
+	kfree(csid_dev);
+	kfree(csid_hw_info);
+	kfree(csid_hw_intf);
+	return 0;
+}
+
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+	uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_ife_csid_hw_list[hw_idx]) {
+		*ife_csid_hw = cam_ife_csid_hw_list[hw_idx];
+	} else {
+		*ife_csid_hw = NULL;
+		rc = -1;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
new file mode 100644
index 0000000..f4923ba
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_dev.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IFE_CSID_DEV_H_
+#define _CAM_IFE_CSID_DEV_H_
+
+#include "cam_isp_hw.h"
+
+irqreturn_t cam_ife_csid_irq(int irq_num, void *data);
+
+int cam_ife_csid_probe(struct platform_device *pdev);
+int cam_ife_csid_remove(struct platform_device *pdev);
+
+#endif /*_CAM_IFE_CSID_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c
new file mode 100644
index 0000000..2630452
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include "cam_ife_csid_lite17x.h"
+#include "cam_ife_csid_core.h"
+#include "cam_ife_csid_dev.h"
+
+#define CAM_CSID_LITE_DRV_NAME                    "csid_lite"
+
+static struct cam_ife_csid_hw_info cam_ife_csid_lite_hw_info = {
+	.csid_reg = &cam_ife_csid_lite_17x_reg_offset,
+};
+
+static const struct of_device_id cam_ife_csid_lite_dt_match[] = {
+	{
+		.compatible = "qcom,csid-lite170",
+		.data = &cam_ife_csid_lite_hw_info,
+	},
+	{
+		.compatible = "qcom,csid-lite175",
+		.data = &cam_ife_csid_lite_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_ife_csid_lite_dt_match);
+
+static struct platform_driver cam_ife_csid_lite_driver = {
+	.probe = cam_ife_csid_probe,
+	.remove = cam_ife_csid_remove,
+	.driver = {
+		.name = CAM_CSID_LITE_DRV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ife_csid_lite_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_ife_csid_lite_init_module(void)
+{
+	return platform_driver_register(&cam_ife_csid_lite_driver);
+}
+
+static void __exit cam_ife_csid_lite_exit_module(void)
+{
+	platform_driver_unregister(&cam_ife_csid_lite_driver);
+}
+
+module_init(cam_ife_csid_lite_init_module);
+module_exit(cam_ife_csid_lite_exit_module);
+MODULE_DESCRIPTION("CAM IFE_CSID_LITE driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h
new file mode 100644
index 0000000..49a427b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_lite17x.h
@@ -0,0 +1,313 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IFE_CSID_LITE17X_H_
+#define _CAM_IFE_CSID_LITE17X_H_
+#include "cam_ife_csid_core.h"
+
+static const struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_17x_rdi_0_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x30,
+	.csid_rdi_irq_mask_addr                   = 0x34,
+	.csid_rdi_irq_clear_addr                  = 0x38,
+	.csid_rdi_irq_set_addr                    = 0x3c,
+	.csid_rdi_cfg0_addr                       = 0x200,
+	.csid_rdi_cfg1_addr                       = 0x204,
+	.csid_rdi_ctrl_addr                       = 0x208,
+	.csid_rdi_frm_drop_pattern_addr           = 0x20c,
+	.csid_rdi_frm_drop_period_addr            = 0x210,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x214,
+	.csid_rdi_irq_subsample_period_addr       = 0x218,
+	.csid_rdi_rpp_hcrop_addr                  = 0x21c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x220,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x224,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x228,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x22c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x230,
+	.csid_rdi_rst_strobes_addr                = 0x240,
+	.csid_rdi_status_addr                     = 0x250,
+	.csid_rdi_misr_val0_addr                  = 0x254,
+	.csid_rdi_misr_val1_addr                  = 0x258,
+	.csid_rdi_misr_val2_addr                  = 0x25c,
+	.csid_rdi_misr_val3_addr                  = 0x260,
+	.csid_rdi_format_measure_cfg0_addr        = 0x270,
+	.csid_rdi_format_measure_cfg1_addr        = 0x274,
+	.csid_rdi_format_measure0_addr            = 0x278,
+	.csid_rdi_format_measure1_addr            = 0x27c,
+	.csid_rdi_format_measure2_addr            = 0x280,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x290,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x294,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x298,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x29c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x2a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x2a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x2a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x2ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x2e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x2e4,
+};
+
+static const struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_17x_rdi_1_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x40,
+	.csid_rdi_irq_mask_addr                   = 0x44,
+	.csid_rdi_irq_clear_addr                  = 0x48,
+	.csid_rdi_irq_set_addr                    = 0x4c,
+	.csid_rdi_cfg0_addr                       = 0x300,
+	.csid_rdi_cfg1_addr                       = 0x304,
+	.csid_rdi_ctrl_addr                       = 0x308,
+	.csid_rdi_frm_drop_pattern_addr           = 0x30c,
+	.csid_rdi_frm_drop_period_addr            = 0x310,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x314,
+	.csid_rdi_irq_subsample_period_addr       = 0x318,
+	.csid_rdi_rpp_hcrop_addr                  = 0x31c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x320,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x324,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x328,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x32c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x330,
+	.csid_rdi_rst_strobes_addr                = 0x340,
+	.csid_rdi_status_addr                     = 0x350,
+	.csid_rdi_misr_val0_addr                  = 0x354,
+	.csid_rdi_misr_val1_addr                  = 0x358,
+	.csid_rdi_misr_val2_addr                  = 0x35c,
+	.csid_rdi_misr_val3_addr                  = 0x360,
+	.csid_rdi_format_measure_cfg0_addr        = 0x370,
+	.csid_rdi_format_measure_cfg1_addr        = 0x374,
+	.csid_rdi_format_measure0_addr            = 0x378,
+	.csid_rdi_format_measure1_addr            = 0x37c,
+	.csid_rdi_format_measure2_addr            = 0x380,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x390,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x394,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x398,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x39c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x3a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x3a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x3a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x3ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x3e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x3e4,
+};
+
+static const struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_17x_rdi_2_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x50,
+	.csid_rdi_irq_mask_addr                   = 0x54,
+	.csid_rdi_irq_clear_addr                  = 0x58,
+	.csid_rdi_irq_set_addr                    = 0x5c,
+	.csid_rdi_cfg0_addr                       = 0x400,
+	.csid_rdi_cfg1_addr                       = 0x404,
+	.csid_rdi_ctrl_addr                       = 0x408,
+	.csid_rdi_frm_drop_pattern_addr           = 0x40c,
+	.csid_rdi_frm_drop_period_addr            = 0x410,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x414,
+	.csid_rdi_irq_subsample_period_addr       = 0x418,
+	.csid_rdi_rpp_hcrop_addr                  = 0x41c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x420,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x424,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x428,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x42c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x430,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x434,
+	.csid_rdi_rst_strobes_addr                = 0x440,
+	.csid_rdi_status_addr                     = 0x450,
+	.csid_rdi_misr_val0_addr                  = 0x454,
+	.csid_rdi_misr_val1_addr                  = 0x458,
+	.csid_rdi_misr_val2_addr                  = 0x45c,
+	.csid_rdi_misr_val3_addr                  = 0x460,
+	.csid_rdi_format_measure_cfg0_addr        = 0x470,
+	.csid_rdi_format_measure_cfg1_addr        = 0x474,
+	.csid_rdi_format_measure0_addr            = 0x478,
+	.csid_rdi_format_measure1_addr            = 0x47c,
+	.csid_rdi_format_measure2_addr            = 0x480,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x490,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x494,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x498,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x49c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x4a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x4a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x4a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x4ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x4e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x4e4,
+};
+
+static const struct cam_ife_csid_rdi_reg_offset
+	cam_ife_csid_lite_17x_rdi_3_reg_offset = {
+
+	.csid_rdi_irq_status_addr                 = 0x60,
+	.csid_rdi_irq_mask_addr                   = 0x64,
+	.csid_rdi_irq_clear_addr                  = 0x68,
+	.csid_rdi_irq_set_addr                    = 0x6c,
+	.csid_rdi_cfg0_addr                       = 0x500,
+	.csid_rdi_cfg1_addr                       = 0x504,
+	.csid_rdi_ctrl_addr                       = 0x508,
+	.csid_rdi_frm_drop_pattern_addr           = 0x50c,
+	.csid_rdi_frm_drop_period_addr            = 0x510,
+	.csid_rdi_irq_subsample_pattern_addr      = 0x514,
+	.csid_rdi_irq_subsample_period_addr       = 0x518,
+	.csid_rdi_rpp_hcrop_addr                  = 0x51c,
+	.csid_rdi_rpp_vcrop_addr                  = 0x520,
+	.csid_rdi_rpp_pix_drop_pattern_addr       = 0x524,
+	.csid_rdi_rpp_pix_drop_period_addr        = 0x528,
+	.csid_rdi_rpp_line_drop_pattern_addr      = 0x52c,
+	.csid_rdi_rpp_line_drop_period_addr       = 0x530,
+	.csid_rdi_yuv_chroma_conversion_addr      = 0x534,
+	.csid_rdi_rst_strobes_addr                = 0x540,
+	.csid_rdi_status_addr                     = 0x550,
+	.csid_rdi_misr_val0_addr                  = 0x554,
+	.csid_rdi_misr_val1_addr                  = 0x558,
+	.csid_rdi_misr_val2_addr                  = 0x55c,
+	.csid_rdi_misr_val3_addr                  = 0x560,
+	.csid_rdi_format_measure_cfg0_addr        = 0x570,
+	.csid_rdi_format_measure_cfg1_addr        = 0x574,
+	.csid_rdi_format_measure0_addr            = 0x578,
+	.csid_rdi_format_measure1_addr            = 0x57c,
+	.csid_rdi_format_measure2_addr            = 0x580,
+	.csid_rdi_timestamp_curr0_sof_addr        = 0x590,
+	.csid_rdi_timestamp_curr1_sof_addr        = 0x594,
+	.csid_rdi_timestamp_prev0_sof_addr        = 0x598,
+	.csid_rdi_timestamp_prev1_sof_addr        = 0x59c,
+	.csid_rdi_timestamp_curr0_eof_addr        = 0x5a0,
+	.csid_rdi_timestamp_curr1_eof_addr        = 0x5a4,
+	.csid_rdi_timestamp_prev0_eof_addr        = 0x5a8,
+	.csid_rdi_timestamp_prev1_eof_addr        = 0x5ac,
+	.csid_rdi_byte_cntr_ping_addr             = 0x5e0,
+	.csid_rdi_byte_cntr_pong_addr             = 0x5e4,
+};
+
+static const struct cam_ife_csid_csi2_rx_reg_offset
+	cam_ife_csid_lite_17x_csi2_reg_offset = {
+
+	.csid_csi2_rx_irq_status_addr                 = 0x20,
+	.csid_csi2_rx_irq_mask_addr                   = 0x24,
+	.csid_csi2_rx_irq_clear_addr                  = 0x28,
+	.csid_csi2_rx_irq_set_addr                    = 0x2c,
+
+	/*CSI2 rx control */
+	.csid_csi2_rx_cfg0_addr                       = 0x100,
+	.csid_csi2_rx_cfg1_addr                       = 0x104,
+	.csid_csi2_rx_capture_ctrl_addr               = 0x108,
+	.csid_csi2_rx_rst_strobes_addr                = 0x110,
+	.csid_csi2_rx_de_scramble_cfg0_addr           = 0x114,
+	.csid_csi2_rx_de_scramble_cfg1_addr           = 0x118,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_0_addr   = 0x120,
+	.csid_csi2_rx_cap_unmap_long_pkt_hdr_1_addr   = 0x124,
+	.csid_csi2_rx_captured_short_pkt_0_addr       = 0x128,
+	.csid_csi2_rx_captured_short_pkt_1_addr       = 0x12c,
+	.csid_csi2_rx_captured_long_pkt_0_addr        = 0x130,
+	.csid_csi2_rx_captured_long_pkt_1_addr        = 0x134,
+	.csid_csi2_rx_captured_long_pkt_ftr_addr      = 0x138,
+	.csid_csi2_rx_captured_cphy_pkt_hdr_addr      = 0x13c,
+	.csid_csi2_rx_lane0_misr_addr                 = 0x150,
+	.csid_csi2_rx_lane1_misr_addr                 = 0x154,
+	.csid_csi2_rx_lane2_misr_addr                 = 0x158,
+	.csid_csi2_rx_lane3_misr_addr                 = 0x15c,
+	.csid_csi2_rx_total_pkts_rcvd_addr            = 0x160,
+	.csid_csi2_rx_stats_ecc_addr                  = 0x164,
+	.csid_csi2_rx_total_crc_err_addr              = 0x168,
+
+	.csi2_rst_srb_all                             = 0x3FFF,
+	.csi2_rst_done_shift_val                      = 27,
+	.csi2_irq_mask_all                            = 0xFFFFFFF,
+	.csi2_misr_enable_shift_val                   = 6,
+	.csi2_vc_mode_shift_val                       = 2,
+	.csi2_capture_long_pkt_en_shift               = 0,
+	.csi2_capture_short_pkt_en_shift              = 1,
+	.csi2_capture_cphy_pkt_en_shift               = 2,
+	.csi2_capture_long_pkt_dt_shift               = 4,
+	.csi2_capture_long_pkt_vc_shift               = 10,
+	.csi2_capture_short_pkt_vc_shift              = 15,
+	.csi2_capture_cphy_pkt_dt_shift               = 20,
+	.csi2_capture_cphy_pkt_vc_shift               = 26,
+};
+
+
+static const struct cam_ife_csid_csi2_tpg_reg_offset
+	cam_ife_csid_lite_17x_tpg_reg_offset = {
+
+	/*CSID TPG control */
+	.csid_tpg_ctrl_addr                           = 0x600,
+	.csid_tpg_vc_cfg0_addr                        = 0x604,
+	.csid_tpg_vc_cfg1_addr                        = 0x608,
+	.csid_tpg_lfsr_seed_addr                      = 0x60c,
+	.csid_tpg_dt_n_cfg_0_addr                     = 0x610,
+	.csid_tpg_dt_n_cfg_1_addr                     = 0x614,
+	.csid_tpg_dt_n_cfg_2_addr                     = 0x618,
+	.csid_tpg_color_bars_cfg_addr                 = 0x640,
+	.csid_tpg_color_box_cfg_addr                  = 0x644,
+	.csid_tpg_common_gen_cfg_addr                 = 0x648,
+	.csid_tpg_cgen_n_cfg_addr                     = 0x650,
+	.csid_tpg_cgen_n_x0_addr                      = 0x654,
+	.csid_tpg_cgen_n_x1_addr                      = 0x658,
+	.csid_tpg_cgen_n_x2_addr                      = 0x65c,
+	.csid_tpg_cgen_n_xy_addr                      = 0x660,
+	.csid_tpg_cgen_n_y1_addr                      = 0x664,
+	.csid_tpg_cgen_n_y2_addr                      = 0x668,
+
+	/*configurations */
+	.tpg_dtn_cfg_offset                           = 0xc,
+	.tpg_cgen_cfg_offset                          = 0x20,
+	.tpg_cpas_ife_reg_offset                      = 0x28,
+};
+
+
+static const struct cam_ife_csid_common_reg_offset
+	cam_csid_lite_17x_cmn_reg_offset = {
+
+	.csid_hw_version_addr                         = 0x0,
+	.csid_cfg0_addr                               = 0x4,
+	.csid_ctrl_addr                               = 0x8,
+	.csid_reset_addr                              = 0xc,
+	.csid_rst_strobes_addr                        = 0x10,
+
+	.csid_test_bus_ctrl_addr                      = 0x14,
+	.csid_top_irq_status_addr                     = 0x70,
+	.csid_top_irq_mask_addr                       = 0x74,
+	.csid_top_irq_clear_addr                      = 0x78,
+	.csid_top_irq_set_addr                        = 0x7c,
+	.csid_irq_cmd_addr                            = 0x80,
+
+	/*configurations */
+	.major_version                                = 1,
+	.minor_version                                = 7,
+	.version_incr                                 = 0,
+	.num_rdis                                     = 4,
+	.num_pix                                      = 0,
+	.csid_rst_stb                                 = 0x1e,
+	.csid_rst_stb_sw_all                          = 0x1f,
+	.path_rst_stb_all                             = 0x7f,
+	.path_rst_done_shift_val                      = 1,
+	.path_en_shift_val                            = 31,
+	.dt_id_shift_val                              = 27,
+	.vc_shift_val                                 = 22,
+	.dt_shift_val                                 = 16,
+	.fmt_shift_val                                = 12,
+	.plain_fmt_shit_val                           = 10,
+	.crop_v_en_shift_val                          = 6,
+	.crop_h_en_shift_val                          = 5,
+	.crop_shift                                   = 16,
+	.ipp_irq_mask_all                             = 0x7FFF,
+	.rdi_irq_mask_all                             = 0x7FFF,
+	.ppp_irq_mask_all                             = 0xFFFF,
+};
+
+static const struct cam_ife_csid_reg_offset cam_ife_csid_lite_17x_reg_offset = {
+	.cmn_reg          = &cam_csid_lite_17x_cmn_reg_offset,
+	.csi2_reg         = &cam_ife_csid_lite_17x_csi2_reg_offset,
+	.ipp_reg          = NULL,
+	.rdi_reg = {
+		&cam_ife_csid_lite_17x_rdi_0_reg_offset,
+		&cam_ife_csid_lite_17x_rdi_1_reg_offset,
+		&cam_ife_csid_lite_17x_rdi_2_reg_offset,
+		&cam_ife_csid_lite_17x_rdi_3_reg_offset,
+		},
+	.tpg_reg = &cam_ife_csid_lite_17x_tpg_reg_offset,
+};
+
+#endif /*_CAM_IFE_CSID_LITE17X_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
new file mode 100644
index 0000000..d38096b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#include <linux/slab.h>
+#include "cam_ife_csid_soc.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static int cam_ife_csid_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	struct csid_device_soc_info *csid_soc_info = NULL;
+	int rc = 0;
+
+	of_node = soc_info->pdev->dev.of_node;
+	csid_soc_info = (struct csid_device_soc_info *)soc_info->soc_private;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+static int cam_ife_csid_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler,
+	void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data)
+{
+	int rc = 0;
+	struct cam_cpas_register_params   cpas_register_param;
+	struct cam_csid_soc_private      *soc_private;
+
+	soc_private = kzalloc(sizeof(struct cam_csid_soc_private), GFP_KERNEL);
+	if (!soc_private)
+		return -ENOMEM;
+
+	soc_info->soc_private = soc_private;
+
+	rc = cam_ife_csid_get_dt_properties(soc_info);
+	if (rc < 0)
+		return rc;
+
+	/* Need to see if we want post process the clock list */
+
+	rc = cam_ife_csid_request_platform_resource(soc_info, csid_irq_handler,
+		irq_data);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP,
+			"Error Request platform resources failed rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "csid",
+		CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = soc_info->dev;
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+		goto release_soc;
+	} else {
+		soc_private->cpas_handle = cpas_register_param.client_handle;
+	}
+
+	return rc;
+
+release_soc:
+	cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_ife_csid_deinit_soc_resources(
+	struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -ENODEV;
+	}
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+
+	return rc;
+}
+
+int cam_ife_csid_enable_soc_resources(
+	struct cam_hw_soc_info *soc_info, uint32_t clk_lvl)
+{
+	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+
+	soc_private = soc_info->soc_private;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+	CAM_DBG(CAM_ISP, "csid vote compressed_bw:%lld uncompressed_bw:%lld",
+		axi_vote.compressed_bw, axi_vote.uncompressed_bw);
+
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error CPAS start failed");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		clk_lvl, true);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "enable platform failed");
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	cam_cpas_stop(soc_private->cpas_handle);
+end:
+	return rc;
+}
+
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Disable platform failed");
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error CPAS stop failed rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_ife_csid_enable_ife_force_clock_on(struct cam_hw_soc_info  *soc_info,
+	uint32_t cpas_ife_base_offset)
+{
+	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+	uint32_t                           cpass_ife_force_clk_offset;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+
+	soc_private = soc_info->soc_private;
+	cpass_ife_force_clk_offset =
+		cpas_ife_base_offset + (0x4 * soc_info->index);
+	rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REG_CPASTOP,
+		cpass_ife_force_clk_offset, 1, 1);
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPASS set IFE:%d Force clock On failed",
+			soc_info->index);
+	else
+		CAM_DBG(CAM_ISP, "CPASS set IFE:%d Force clock On",
+		soc_info->index);
+
+	return rc;
+}
+
+int cam_ife_csid_disable_ife_force_clock_on(struct cam_hw_soc_info *soc_info,
+	uint32_t cpas_ife_base_offset)
+{
+	int rc = 0;
+	struct cam_csid_soc_private       *soc_private;
+	uint32_t                           cpass_ife_force_clk_offset;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		return -EINVAL;
+	}
+
+	soc_private = soc_info->soc_private;
+	cpass_ife_force_clk_offset =
+		cpas_ife_base_offset + (0x4 * soc_info->index);
+	rc = cam_cpas_reg_write(soc_private->cpas_handle, CAM_CPAS_REG_CPASTOP,
+		cpass_ife_force_clk_offset,  1, 0);
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPASS set IFE:%d Force clock Off failed",
+			soc_info->index);
+	else
+		CAM_DBG(CAM_ISP, "CPASS set IFE:%d Force clock off",
+		soc_info->index);
+
+	return rc;
+}
+
+uint32_t cam_ife_csid_get_vote_level(struct cam_hw_soc_info *soc_info,
+	uint64_t clock_rate)
+{
+	int i = 0;
+
+	if (!clock_rate)
+		return CAM_SVS_VOTE;
+
+	for (i = 0; i < CAM_MAX_VOTE; i++) {
+		if (soc_info->clk_rate[i][soc_info->num_clk - 1] >=
+			clock_rate) {
+			CAM_DBG(CAM_ISP,
+				"Clock rate %lld, selected clock level %d",
+				clock_rate, i);
+			return i;
+		}
+	}
+
+	return CAM_TURBO_VOTE;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
new file mode 100644
index 0000000..6aa721c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/ife_csid_hw/cam_ife_csid_soc.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IFE_CSID_SOC_H_
+#define _CAM_IFE_CSID_SOC_H_
+
+#include "cam_isp_hw.h"
+
+/*
+ * struct cam_csid_soc_private:
+ *
+ * @Brief:                   Private SOC data specific to CSID HW Driver
+ *
+ * @cpas_handle:             Handle returned on registering with CPAS driver.
+ *                           This handle is used for all further interface
+ *                           with CPAS.
+ */
+struct cam_csid_soc_private {
+	uint32_t cpas_handle;
+};
+
+/**
+ * struct csid_device_soc_info - CSID SOC info object
+ *
+ * @csi_vdd_voltage:       csi vdd voltage value
+ *
+ */
+struct csid_device_soc_info {
+	int                             csi_vdd_voltage;
+};
+
+/**
+ * cam_ife_csid_init_soc_resources()
+ *
+ * @brief:                 csid initialization function for the soc info
+ *
+ * @soc_info:              soc info structure pointer
+ * @csid_irq_handler:      irq handler function to be registered
+ * @irq_data:              irq data for the callback function
+ *
+ */
+int cam_ife_csid_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t csid_irq_handler, void *irq_data);
+
+
+/**
+ * cam_ife_csid_deinit_soc_resources()
+ *
+ * @brief:                 csid de initialization function for the soc info
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_ife_csid_enable_soc_resources()
+ *
+ * @brief:                 csid soc resource enable function
+ *
+ * @soc_info:              soc info structure pointer
+ * @clk_lvl:               vote level to start with
+ *
+ */
+int cam_ife_csid_enable_soc_resources(struct cam_hw_soc_info  *soc_info,
+	uint32_t clk_lvl);
+
+/**
+ * cam_ife_csid_disable_soc_resources()
+ *
+ * @brief:                 csid soc resource disable function
+ *
+ * @soc_info:              soc info structure pointer
+ *
+ */
+int cam_ife_csid_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_ife_csid_enable_ife_force_clock()
+ *
+ * @brief:                 if csid testgen used for dual isp case, before
+ *                         starting csid test gen, enable ife force clock on
+ *                         through cpas
+ *
+ * @soc_info:              soc info structure pointer
+ * @cpas_ife_base_offset:  cpas ife force clock base reg offset value
+ *
+ */
+int cam_ife_csid_enable_ife_force_clock_on(struct cam_hw_soc_info  *soc_info,
+	uint32_t cpas_ife_base_offset);
+
+/**
+ * cam_ife_csid_disable_ife_force_clock_on()
+ *
+ * @brief:                 disable the IFE force clock on after dual ISP
+ *                         CSID test gen stop
+ *
+ * @soc_info:              soc info structure pointer
+ * @cpas_ife_base_offset:  cpas ife force clock base reg offset value
+ *
+ */
+int cam_ife_csid_disable_ife_force_clock_on(struct cam_hw_soc_info *soc_info,
+	uint32_t cpas_ife_base_offset);
+
+/**
+ * cam_ife_csid_get_vote_level()
+ *
+ * @brief:                 get the vote level from clock rate
+ *
+ * @soc_info:              soc info structure pointer
+ * @clock_rate             clock rate
+ *
+ */
+uint32_t cam_ife_csid_get_vote_level(struct cam_hw_soc_info *soc_info,
+	uint64_t clock_rate);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
new file mode 100644
index 0000000..852d2a3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_ife_csid_hw_intf.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CSID_HW_INTF_H_
+#define _CAM_CSID_HW_INTF_H_
+
+#include "cam_isp_hw.h"
+#include "cam_hw_intf.h"
+
+/* MAX IFE CSID instance */
+#define CAM_IFE_CSID_HW_NUM_MAX                        4
+
+/**
+ * enum cam_ife_pix_path_res_id - Specify the csid patch
+ */
+enum cam_ife_pix_path_res_id {
+	CAM_IFE_PIX_PATH_RES_RDI_0,
+	CAM_IFE_PIX_PATH_RES_RDI_1,
+	CAM_IFE_PIX_PATH_RES_RDI_2,
+	CAM_IFE_PIX_PATH_RES_RDI_3,
+	CAM_IFE_PIX_PATH_RES_IPP,
+	CAM_IFE_PIX_PATH_RES_PPP,
+	CAM_IFE_PIX_PATH_RES_MAX,
+};
+
+/**
+ * enum cam_ife_cid_res_id - Specify the csid cid
+ */
+enum cam_ife_cid_res_id {
+	CAM_IFE_CSID_CID_0,
+	CAM_IFE_CSID_CID_1,
+	CAM_IFE_CSID_CID_2,
+	CAM_IFE_CSID_CID_3,
+	CAM_IFE_CSID_CID_MAX,
+};
+
+/**
+ * struct cam_ife_csid_hw_caps- get the CSID hw capability
+ * @num_rdis:       number of rdis supported by CSID HW device
+ * @num_pix:        number of pxl paths supported by CSID HW device
+ * @num_ppp:        number of ppp paths supported by CSID HW device
+ * @major_version : major version
+ * @minor_version:  minor version
+ * @version_incr:   version increment
+ *
+ */
+struct cam_ife_csid_hw_caps {
+	uint32_t      num_rdis;
+	uint32_t      num_pix;
+	uint32_t      num_ppp;
+	uint32_t      major_version;
+	uint32_t      minor_version;
+	uint32_t      version_incr;
+};
+
+/**
+ * struct cam_csid_hw_reserve_resource- hw reserve
+ * @res_type :    Reource type CID or PATH
+ *                if type is CID, then res_id is not required,
+ *                if type is path then res id need to be filled
+ * @res_id  :     Resource id to be reserved
+ * @in_port :     Input port resource info
+ * @out_port:     Output port resource info, used for RDI path only
+ * @sync_mode:    Sync mode
+ *                Sync mode could be master, slave or none
+ * @master_idx:   Master device index to be configured in the slave path
+ *                for master path, this value is not required.
+ *                only slave need to configure the master index value
+ * @cid:          cid (DT_ID) value for path, this is applicable for CSID path
+ *                reserve
+ * @node_res :    Reserved resource structure pointer
+ *
+ */
+struct cam_csid_hw_reserve_resource_args {
+	enum cam_isp_resource_type                res_type;
+	uint32_t                                  res_id;
+	struct cam_isp_in_port_info              *in_port;
+	struct cam_isp_out_port_info             *out_port;
+	enum cam_isp_hw_sync_mode                 sync_mode;
+	uint32_t                                  master_idx;
+	uint32_t                                  cid;
+	struct cam_isp_resource_node             *node_res;
+};
+
+/**
+ *  enum cam_ife_csid_halt_cmd - Specify the halt command type
+ */
+enum cam_ife_csid_halt_cmd {
+	CAM_CSID_HALT_AT_FRAME_BOUNDARY,
+	CAM_CSID_RESUME_AT_FRAME_BOUNDARY,
+	CAM_CSID_HALT_IMMEDIATELY,
+	CAM_CSID_HALT_MAX,
+};
+
+/**
+ * struct cam_csid_hw_stop- stop all resources
+ * @stop_cmd : Applicable only for PATH resources
+ *             if stop command set to Halt immediately,driver will stop
+ *             path immediately, manager need to reset the path after HI
+ *             if stop command set to halt at frame boundary, driver will set
+ *             halt at frame boundary and wait for frame boundary
+ * @node_res :  reource pointer array( ie cid or CSID)
+ * @num_res :   number of resources to be stopped
+ *
+ */
+struct cam_csid_hw_stop_args {
+	enum cam_ife_csid_halt_cmd                stop_cmd;
+	struct cam_isp_resource_node            **node_res;
+	uint32_t                                  num_res;
+};
+
+/**
+ * enum cam_ife_csid_reset_type - Specify the reset type
+ */
+enum cam_ife_csid_reset_type {
+	CAM_IFE_CSID_RESET_GLOBAL,
+	CAM_IFE_CSID_RESET_PATH,
+	CAM_IFE_CSID_RESET_MAX,
+};
+
+/**
+ * struct cam_ife_csid_reset_cfg-  csid reset configuration
+ * @ reset_type : Global reset or path reset
+ * @res_node :   resource need to be reset
+ *
+ */
+struct cam_csid_reset_cfg_args {
+	enum cam_ife_csid_reset_type   reset_type;
+	struct cam_isp_resource_node  *node_res;
+};
+
+/**
+ * struct cam_csid_get_time_stamp_args-  time stamp capture arguments
+ * @res_node :   resource to get the time stamp
+ * @time_stamp_val : captured time stamp
+ * @boot_timestamp : boot time stamp
+ */
+struct cam_csid_get_time_stamp_args {
+	struct cam_isp_resource_node      *node_res;
+	uint64_t                           time_stamp_val;
+	uint64_t                           boot_timestamp;
+};
+
+/**
+ * enum cam_ife_csid_cmd_type - Specify the csid command
+ */
+enum cam_ife_csid_cmd_type {
+	CAM_IFE_CSID_CMD_GET_TIME_STAMP,
+	CAM_IFE_CSID_SET_CSID_DEBUG,
+	CAM_IFE_CSID_SOF_IRQ_DEBUG,
+	CAM_IFE_CSID_CMD_MAX,
+};
+
+/**
+ * cam_ife_csid_hw_init()
+ *
+ * @brief:               Initialize function for the CSID hardware
+ *
+ * @ife_csid_hw:         CSID hardware instance returned
+ * @hw_idex:             CSID hardware instance id
+ */
+int cam_ife_csid_hw_init(struct cam_hw_intf **ife_csid_hw,
+	uint32_t hw_idx);
+
+/*
+ * struct cam_ife_csid_clock_update_args:
+ *
+ * @clk_rate:                Clock rate requested
+ */
+struct cam_ife_csid_clock_update_args {
+	uint64_t                           clk_rate;
+};
+
+
+#endif /* _CAM_CSID_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
new file mode 100644
index 0000000..5f95de5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -0,0 +1,220 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ISP_HW_H_
+#define _CAM_ISP_HW_H_
+
+#include <linux/completion.h>
+#include "cam_hw.h"
+#include <uapi/media/cam_isp.h>
+#include "cam_soc_util.h"
+#include "cam_irq_controller.h"
+#include <uapi/media/cam_isp.h>
+
+/*
+ * struct cam_isp_timestamp:
+ *
+ * @mono_time:          Monotonic boot time
+ * @vt_time:            AV Timer time
+ * @ticks:              Qtimer ticks
+ */
+struct cam_isp_timestamp {
+	struct timeval          mono_time;
+	struct timeval          vt_time;
+	uint64_t                ticks;
+};
+
+/*
+ * cam_isp_hw_get_timestamp()
+ *
+ * @Brief:              Get timestamp values
+ *
+ * @time_stamp:         Structure that holds different time values
+ *
+ * @Return:             Void
+ */
+void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp);
+
+enum cam_isp_hw_type {
+	CAM_ISP_HW_TYPE_CSID        = 0,
+	CAM_ISP_HW_TYPE_ISPIF       = 1,
+	CAM_ISP_HW_TYPE_VFE         = 2,
+	CAM_ISP_HW_TYPE_IFE_CSID    = 3,
+	CAM_ISP_HW_TYPE_MAX         = 4,
+};
+
+enum cam_isp_hw_split_id {
+	CAM_ISP_HW_SPLIT_LEFT       = 0,
+	CAM_ISP_HW_SPLIT_RIGHT,
+	CAM_ISP_HW_SPLIT_MAX,
+};
+
+enum cam_isp_hw_sync_mode {
+	CAM_ISP_HW_SYNC_NONE,
+	CAM_ISP_HW_SYNC_MASTER,
+	CAM_ISP_HW_SYNC_SLAVE,
+	CAM_ISP_HW_SYNC_MAX,
+};
+
+enum cam_isp_resource_state {
+	CAM_ISP_RESOURCE_STATE_UNAVAILABLE   = 0,
+	CAM_ISP_RESOURCE_STATE_AVAILABLE     = 1,
+	CAM_ISP_RESOURCE_STATE_RESERVED      = 2,
+	CAM_ISP_RESOURCE_STATE_INIT_HW       = 3,
+	CAM_ISP_RESOURCE_STATE_STREAMING     = 4,
+};
+
+enum cam_isp_resource_type {
+	CAM_ISP_RESOURCE_UNINT,
+	CAM_ISP_RESOURCE_SRC,
+	CAM_ISP_RESOURCE_CID,
+	CAM_ISP_RESOURCE_PIX_PATH,
+	CAM_ISP_RESOURCE_VFE_IN,
+	CAM_ISP_RESOURCE_VFE_OUT,
+	CAM_ISP_RESOURCE_MAX,
+};
+
+enum cam_isp_hw_cmd_type {
+	CAM_ISP_HW_CMD_GET_CHANGE_BASE,
+	CAM_ISP_HW_CMD_GET_BUF_UPDATE,
+	CAM_ISP_HW_CMD_GET_REG_UPDATE,
+	CAM_ISP_HW_CMD_GET_HFR_UPDATE,
+	CAM_ISP_HW_CMD_GET_SECURE_MODE,
+	CAM_ISP_HW_CMD_STRIPE_UPDATE,
+	CAM_ISP_HW_CMD_CLOCK_UPDATE,
+	CAM_ISP_HW_CMD_BW_UPDATE,
+	CAM_ISP_HW_CMD_BW_CONTROL,
+	CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ,
+	CAM_ISP_HW_CMD_GET_REG_DUMP,
+	CAM_ISP_HW_CMD_UBWC_UPDATE,
+	CAM_ISP_HW_CMD_SOF_IRQ_DEBUG,
+	CAM_ISP_HW_CMD_SET_CAMIF_DEBUG,
+	CAM_ISP_HW_CMD_CSID_CLOCK_UPDATE,
+	CAM_ISP_HW_CMD_MAX,
+};
+
+/*
+ * struct cam_isp_resource_node:
+ *
+ * @Brief:                        Structure representing HW resource object
+ *
+ * @res_type:                     Resource Type
+ * @res_id:                       Unique resource ID within res_type objects
+ *                                for a particular HW
+ * @res_state:                    State of the resource
+ * @hw_intf:                      HW Interface of HW to which this resource
+ *                                belongs
+ * @res_priv:                     Private data of the resource
+ * @list:                         list_head node for this resource
+ * @cdm_ops:                      CDM operation functions
+ * @tasklet_info:                 Tasklet structure that will be used to
+ *                                schedule IRQ events related to this resource
+ * @irq_handle:                   handle returned on subscribing for IRQ event
+ * @rdi_only_ctx:                 resource belong to rdi only context or not
+ * @init:                         function pointer to init the HW resource
+ * @deinit:                       function pointer to deinit the HW resource
+ * @start:                        function pointer to start the HW resource
+ * @stop:                         function pointer to stop the HW resource
+ * @process_cmd:                  function pointer for processing commands
+ *                                specific to the resource
+ * @top_half_handler:             Top Half handler function
+ * @bottom_half_handler:          Bottom Half handler function
+ */
+struct cam_isp_resource_node {
+	enum cam_isp_resource_type     res_type;
+	uint32_t                       res_id;
+	enum cam_isp_resource_state    res_state;
+	struct cam_hw_intf            *hw_intf;
+	void                          *res_priv;
+	struct list_head               list;
+	void                          *cdm_ops;
+	void                          *tasklet_info;
+	int                            irq_handle;
+	int                            rdi_only_ctx;
+
+	int (*init)(struct cam_isp_resource_node *rsrc_node,
+		void *init_args, uint32_t arg_size);
+	int (*deinit)(struct cam_isp_resource_node *rsrc_node,
+		void *deinit_args, uint32_t arg_size);
+	int (*start)(struct cam_isp_resource_node *rsrc_node);
+	int (*stop)(struct cam_isp_resource_node *rsrc_node);
+	int (*process_cmd)(struct cam_isp_resource_node *rsrc_node,
+		uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+	CAM_IRQ_HANDLER_TOP_HALF       top_half_handler;
+	CAM_IRQ_HANDLER_BOTTOM_HALF    bottom_half_handler;
+};
+
+/*
+ * struct cam_isp_hw_cmd_buf_update:
+ *
+ * @Brief:           Contain the new created command buffer information
+ *
+ * @cmd_buf_addr:    Command buffer to store the change base command
+ * @size:            Size of the buffer in bytes
+ * @used_bytes:      Consumed bytes in the command buffer
+ *
+ */
+struct cam_isp_hw_cmd_buf_update {
+	uint32_t                       *cmd_buf_addr;
+	uint32_t                        size;
+	uint32_t                        used_bytes;
+};
+
+/*
+ * struct cam_isp_hw_get_wm_update:
+ *
+ * @Brief:         Get cmd buffer for WM updates.
+ *
+ * @ image_buf:    image buffer address array
+ * @ num_buf:      Number of buffers in the image_buf array
+ * @ io_cfg:       IO buffer config information sent from UMD
+ *
+ */
+struct cam_isp_hw_get_wm_update {
+	uint64_t                       *image_buf;
+	uint32_t                        num_buf;
+	struct cam_buf_io_cfg          *io_cfg;
+};
+
+/*
+ * struct cam_isp_hw_get_cmd_update:
+ *
+ * @Brief:         Get cmd buffer update for different CMD types
+ *
+ * @res:           Resource node
+ * @cmd_type:      Command type for which to get update
+ * @cmd:           Command buffer information
+ *
+ */
+struct cam_isp_hw_get_cmd_update {
+	struct cam_isp_resource_node     *res;
+	enum cam_isp_hw_cmd_type          cmd_type;
+	struct cam_isp_hw_cmd_buf_update  cmd;
+	union {
+		void                                 *data;
+		struct cam_isp_hw_get_wm_update      *wm_update;
+		struct cam_isp_port_hfr_config       *hfr_update;
+		struct cam_isp_clock_config          *clock_update;
+		struct cam_isp_bw_config             *bw_update;
+		struct cam_ubwc_plane_cfg_v1         *ubwc_update;
+	};
+};
+
+/*
+ * struct cam_isp_hw_dual_isp_update_args:
+ *
+ * @Brief:        update the dual isp striping configuration.
+ *
+ * @ split_id:    spilt id to inform left or rifht
+ * @ res:         resource node
+ * @ dual_cfg:    dual isp configuration
+ *
+ */
+struct cam_isp_hw_dual_isp_update_args {
+	enum cam_isp_hw_split_id         split_id;
+	struct cam_isp_resource_node    *res;
+	struct cam_isp_dual_config      *dual_cfg;
+};
+#endif /* _CAM_ISP_HW_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
new file mode 100644
index 0000000..c182042
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -0,0 +1,295 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_HW_INTF_H_
+#define _CAM_VFE_HW_INTF_H_
+
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_HW_NUM_MAX                       4
+
+#define VFE_CORE_BASE_IDX             0
+/*
+ * VBIF and BUS do not exist on same HW.
+ * Hence both can be 1 below.
+ */
+#define VFE_VBIF_BASE_IDX             1
+#define VFE_BUS_BASE_IDX              1
+
+enum cam_isp_hw_vfe_in_mux {
+	CAM_ISP_HW_VFE_IN_CAMIF       = 0,
+	CAM_ISP_HW_VFE_IN_TESTGEN     = 1,
+	CAM_ISP_HW_VFE_IN_BUS_RD      = 2,
+	CAM_ISP_HW_VFE_IN_RDI0        = 3,
+	CAM_ISP_HW_VFE_IN_RDI1        = 4,
+	CAM_ISP_HW_VFE_IN_RDI2        = 5,
+	CAM_ISP_HW_VFE_IN_RDI3        = 6,
+	CAM_ISP_HW_VFE_IN_CAMIF_LITE  = 7,
+	CAM_ISP_HW_VFE_IN_MAX,
+};
+
+enum cam_isp_hw_vfe_core {
+	CAM_ISP_HW_VFE_CORE_0,
+	CAM_ISP_HW_VFE_CORE_1,
+	CAM_ISP_HW_VFE_CORE_2,
+	CAM_ISP_HW_VFE_CORE_3,
+	CAM_ISP_HW_VFE_CORE_MAX,
+};
+
+enum cam_vfe_hw_irq_status {
+	CAM_VFE_IRQ_STATUS_ERR_COMP             = -3,
+	CAM_VFE_IRQ_STATUS_COMP_OWRT            = -2,
+	CAM_VFE_IRQ_STATUS_ERR                  = -1,
+	CAM_VFE_IRQ_STATUS_SUCCESS              = 0,
+	CAM_VFE_IRQ_STATUS_OVERFLOW             = 1,
+	CAM_VFE_IRQ_STATUS_P2I_ERROR            = 2,
+	CAM_VFE_IRQ_STATUS_VIOLATION            = 3,
+	CAM_VFE_IRQ_STATUS_MAX,
+};
+
+enum cam_vfe_hw_irq_regs {
+	CAM_IFE_IRQ_CAMIF_REG_STATUS0           = 0,
+	CAM_IFE_IRQ_CAMIF_REG_STATUS1           = 1,
+	CAM_IFE_IRQ_VIOLATION_STATUS            = 2,
+	CAM_IFE_IRQ_REGISTERS_MAX,
+};
+
+enum cam_vfe_bus_irq_regs {
+	CAM_IFE_IRQ_BUS_REG_STATUS0             = 0,
+	CAM_IFE_IRQ_BUS_REG_STATUS1             = 1,
+	CAM_IFE_IRQ_BUS_REG_STATUS2             = 2,
+	CAM_IFE_IRQ_BUS_REG_COMP_ERR            = 3,
+	CAM_IFE_IRQ_BUS_REG_COMP_OWRT           = 4,
+	CAM_IFE_IRQ_BUS_DUAL_COMP_ERR           = 5,
+	CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT          = 6,
+	CAM_IFE_BUS_IRQ_REGISTERS_MAX,
+};
+
+enum cam_vfe_reset_type {
+	CAM_VFE_HW_RESET_HW_AND_REG,
+	CAM_VFE_HW_RESET_HW,
+	CAM_VFE_HW_RESET_MAX,
+};
+
+/*
+ * struct cam_vfe_hw_get_hw_cap:
+ *
+ * @max_width:               Max width supported by HW
+ * @max_height:              Max height supported by HW
+ * @max_pixel_num:           Max Pixel channels available
+ * @max_rdi_num:             Max Raw channels available
+ */
+struct cam_vfe_hw_get_hw_cap {
+	uint32_t                max_width;
+	uint32_t                max_height;
+	uint32_t                max_pixel_num;
+	uint32_t                max_rdi_num;
+};
+
+/*
+ * struct cam_vfe_hw_vfe_out_acquire_args:
+ *
+ * @rsrc_node:               Pointer to Resource Node object, filled if acquire
+ *                           is successful
+ * @out_port_info:           Output Port details to acquire
+ * @unique_id:               Unique Identity of Context to associate with this
+ *                           resource. Used for composite grouping of multiple
+ *                           resources in the same context
+ * @is_dual:                 Dual VFE or not
+ * @split_id:                In case of Dual VFE, this is Left or Right.
+ *                           (Default is Left if Single VFE)
+ * @is_master:               In case of Dual VFE, this is Master or Slave.
+ *                           (Default is Master in case of Single VFE)
+ * @dual_slave_core:         If Master and Slave exists, HW Index of Slave
+ * @cdm_ops:                 CDM operations
+ * @ctx:                     Context data
+ */
+struct cam_vfe_hw_vfe_out_acquire_args {
+	struct cam_isp_resource_node      *rsrc_node;
+	struct cam_isp_out_port_info      *out_port_info;
+	uint32_t                           unique_id;
+	uint32_t                           is_dual;
+	enum cam_isp_hw_split_id           split_id;
+	uint32_t                           is_master;
+	uint32_t                           dual_slave_core;
+	struct cam_cdm_utils_ops          *cdm_ops;
+	void                              *ctx;
+};
+
+/*
+ * struct cam_vfe_hw_vfe_in_acquire_args:
+ *
+ * @rsrc_node:               Pointer to Resource Node object, filled if acquire
+ *                           is successful
+ * @res_id:                  Resource ID of resource to acquire if specific,
+ *                           else CAM_ISP_HW_VFE_IN_MAX
+ * @cdm_ops:                 CDM operations
+ * @sync_mode:               In case of Dual VFE, this is Master or Slave.
+ *                           (Default is Master in case of Single VFE)
+ * @in_port:                 Input port details to acquire
+ */
+struct cam_vfe_hw_vfe_in_acquire_args {
+	struct cam_isp_resource_node         *rsrc_node;
+	uint32_t                              res_id;
+	void                                 *cdm_ops;
+	enum cam_isp_hw_sync_mode             sync_mode;
+	struct cam_isp_in_port_info          *in_port;
+};
+
+/*
+ * struct cam_vfe_acquire_args:
+ *
+ * @rsrc_type:               Type of Resource (OUT/IN) to acquire
+ * @tasklet:                 Tasklet to associate with this resource. This is
+ *                           used to schedule bottom of IRQ events associated
+ *                           with this resource.
+ * @vfe_out:                 Acquire args for VFE_OUT
+ * @vfe_in:                  Acquire args for VFE_IN
+ */
+struct cam_vfe_acquire_args {
+	enum cam_isp_resource_type           rsrc_type;
+	void                                *tasklet;
+	union {
+		struct cam_vfe_hw_vfe_out_acquire_args  vfe_out;
+		struct cam_vfe_hw_vfe_in_acquire_args   vfe_in;
+	};
+};
+
+/*
+ * struct cam_vfe_clock_update_args:
+ *
+ * @node_res:                Resource to get the time stamp
+ * @clk_rate:                Clock rate requested
+ */
+struct cam_vfe_clock_update_args {
+	struct cam_isp_resource_node      *node_res;
+	uint64_t                           clk_rate;
+};
+
+/*
+ * struct cam_vfe_bw_update_args:
+ *
+ * @node_res:             Resource to get the time stamp
+ * @camnoc_bw_bytes:      Bandwidth vote request for CAMNOC
+ * @external_bw_bytes:    Bandwidth vote request from CAMNOC
+ *                        out to the rest of the path-to-DDR
+ */
+struct cam_vfe_bw_update_args {
+	struct cam_isp_resource_node      *node_res;
+	uint64_t                           camnoc_bw_bytes;
+	uint64_t                           external_bw_bytes;
+};
+
+enum cam_vfe_bw_control_action {
+	CAM_VFE_BW_CONTROL_EXCLUDE       = 0,
+	CAM_VFE_BW_CONTROL_INCLUDE       = 1
+};
+
+/*
+ * struct cam_vfe_bw_control_args:
+ *
+ * @node_res:             Resource to get the time stamp
+ * @action:               Bandwidth control action
+ */
+struct cam_vfe_bw_control_args {
+	struct cam_isp_resource_node      *node_res;
+	enum cam_vfe_bw_control_action     action;
+};
+
+/*
+ * struct cam_vfe_top_irq_evt_payload:
+ *
+ * @Brief:                   This structure is used to save payload for IRQ
+ *                           related to VFE_TOP resources
+ *
+ * @list:                    list_head node for the payload
+ * @core_index:              Index of VFE HW that generated this IRQ event
+ * @core_info:               Private data of handler in bottom half context
+ * @evt_id:                  IRQ event
+ * @irq_reg_val:             IRQ and Error register values, read when IRQ was
+ *                           handled
+ * @error_type:              Identify different errors
+ * @ts:                      Timestamp
+ */
+struct cam_vfe_top_irq_evt_payload {
+	struct list_head           list;
+	uint32_t                   core_index;
+	void                      *core_info;
+	uint32_t                   evt_id;
+	uint32_t                   irq_reg_val[CAM_IFE_IRQ_REGISTERS_MAX];
+	uint32_t                   error_type;
+	struct cam_isp_timestamp   ts;
+};
+
+/*
+ * struct cam_vfe_bus_irq_evt_payload:
+ *
+ * @Brief:                   This structure is used to save payload for IRQ
+ *                           related to VFE_BUS resources
+ *
+ * @list:                    list_head node for the payload
+ * @core_index:              Index of VFE HW that generated this IRQ event
+ * @debug_status_0:          Value of debug status_0 register at time of IRQ
+ * @evt_id:                  IRQ event
+ * @irq_reg_val:             IRQ and Error register values, read when IRQ was
+ *                           handled
+ * @error_type:              Identify different errors
+ * @ts:                      Timestamp
+ * @ctx:                     Context data received during acquire
+ */
+struct cam_vfe_bus_irq_evt_payload {
+	struct list_head            list;
+	uint32_t                    core_index;
+	uint32_t                    debug_status_0;
+	uint32_t                    evt_id;
+	uint32_t                    irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
+	uint32_t                    error_type;
+	struct cam_isp_timestamp    ts;
+	void                       *ctx;
+};
+
+/*
+ * struct cam_vfe_irq_handler_priv:
+ *
+ * @Brief:                   This structure is used as private data to
+ *                           register with IRQ controller. It has information
+ *                           needed by top half and bottom half.
+ *
+ * @core_index:              Index of VFE HW that generated this IRQ event
+ * @core_info:               Private data of handler in bottom half context
+ * @mem_base:                Mapped base address of the register space
+ * @reset_complete:          Completion structure to be signaled if Reset IRQ
+ *                           is Set
+ */
+struct cam_vfe_irq_handler_priv {
+	uint32_t                     core_index;
+	void                        *core_info;
+	void __iomem                *mem_base;
+	struct completion           *reset_complete;
+};
+
+/*
+ * cam_vfe_hw_init()
+ *
+ * @Brief:                  Initialize VFE HW device
+ *
+ * @vfe_hw:                 vfe_hw interface to fill in and return on
+ *                          successful initialization
+ * @hw_idx:                 Index of VFE HW
+ */
+int cam_vfe_hw_init(struct cam_hw_intf **vfe_hw, uint32_t hw_idx);
+
+/*
+ * cam_vfe_put_evt_payload()
+ *
+ * @Brief:                  Put the evt payload back to free list
+ *
+ * @core_info:              VFE HW core_info
+ * @evt_payload:            Event payload data
+ */
+int cam_vfe_put_evt_payload(void             *core_info,
+	struct cam_vfe_top_irq_evt_payload  **evt_payload);
+
+#endif /* _CAM_VFE_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile
new file mode 100644
index 0000000..c40a735
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_soc.o cam_vfe_dev.o cam_vfe_core.o
+obj-$(CONFIG_SPECTRA_CAMERA) += vfe_bus/ vfe_top/ vfe17x/
\ No newline at end of file
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
new file mode 100644
index 0000000..aab911e1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -0,0 +1,815 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/timer.h>
+#include <linux/ratelimit.h>
+#include "cam_tasklet_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_vfe_soc.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_top.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_debug_util.h"
+
+static const char drv_name[] = "vfe";
+static uint32_t irq_reg_offset[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x0000006C,
+	0x00000070,
+	0x0000007C,
+};
+
+static uint32_t camif_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x00000017,
+	0x00000000,
+};
+
+static uint32_t camif_irq_err_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x0003FC00,
+	0xEFFF7EBC,
+};
+
+static uint32_t rdi_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x780001e0,
+	0x00000000,
+};
+
+static uint32_t top_reset_irq_reg_mask[CAM_IFE_IRQ_REGISTERS_MAX] = {
+	0x80000000,
+	0x00000000,
+};
+
+static int cam_vfe_get_evt_payload(struct cam_vfe_hw_core_info *core_info,
+	struct cam_vfe_top_irq_evt_payload    **evt_payload)
+{
+	spin_lock(&core_info->spin_lock);
+	if (list_empty(&core_info->free_payload_list)) {
+		*evt_payload = NULL;
+		spin_unlock(&core_info->spin_lock);
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload, core info 0x%x\n",
+			core_info->cpas_handle);
+		return -ENODEV;
+	}
+
+	*evt_payload = list_first_entry(&core_info->free_payload_list,
+		struct cam_vfe_top_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	spin_unlock(&core_info->spin_lock);
+
+	return 0;
+}
+
+int cam_vfe_put_evt_payload(void             *core_info,
+	struct cam_vfe_top_irq_evt_payload  **evt_payload)
+{
+	struct cam_vfe_hw_core_info        *vfe_core_info = core_info;
+	unsigned long                       flags;
+
+	if (!core_info) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&vfe_core_info->spin_lock, flags);
+	(*evt_payload)->error_type = 0;
+	list_add_tail(&(*evt_payload)->list, &vfe_core_info->free_payload_list);
+	*evt_payload = NULL;
+	spin_unlock_irqrestore(&vfe_core_info->spin_lock, flags);
+
+
+	return 0;
+}
+
+int cam_vfe_get_hw_caps(void *hw_priv, void *get_hw_cap_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_dev = hw_priv;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	int rc = 0;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_dev->core_info;
+
+	if (core_info->vfe_top->hw_ops.get_hw_caps)
+		core_info->vfe_top->hw_ops.get_hw_caps(
+			core_info->vfe_top->top_priv,
+			get_hw_cap_args, arg_size);
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+int cam_vfe_reset_irq_top_half(uint32_t    evt_id,
+	struct cam_irq_th_payload         *th_payload)
+{
+	int32_t                            rc = -EINVAL;
+	struct cam_vfe_irq_handler_priv   *handler_priv;
+
+	handler_priv = th_payload->handler_priv;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+
+	if (th_payload->evt_status_arr[0] & (1<<31)) {
+		/*
+		 * Clear All IRQs to avoid spurious IRQs immediately
+		 * after Reset Done.
+		 */
+		cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x64);
+		cam_io_w(0xFFFFFFFF, handler_priv->mem_base + 0x68);
+		cam_io_w(0x1, handler_priv->mem_base + 0x58);
+		CAM_DBG(CAM_ISP, "Calling Complete for RESET CMD");
+		complete(handler_priv->reset_complete);
+
+
+		rc = 0;
+	}
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+static int cam_vfe_irq_err_top_half(uint32_t    evt_id,
+	struct cam_irq_th_payload   *th_payload)
+{
+	int32_t                              rc;
+	int                                  i;
+	struct cam_vfe_irq_handler_priv     *handler_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+	struct cam_vfe_hw_core_info         *core_info;
+	bool                                 error_flag = false;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x, IRQ status_1 = %x",
+		th_payload->evt_status_arr[0], th_payload->evt_status_arr[1]);
+
+	handler_priv = th_payload->handler_priv;
+	core_info =  handler_priv->core_info;
+	/*
+	 *  need to handle overflow condition here, otherwise irq storm
+	 *  will block everything
+	 */
+	if (th_payload->evt_status_arr[1] ||
+		(th_payload->evt_status_arr[0] & camif_irq_err_reg_mask[0])) {
+		CAM_ERR(CAM_ISP,
+			"Encountered Error: vfe:%d:  Irq_status0=0x%x Status1=0x%x",
+			handler_priv->core_index, th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		CAM_ERR(CAM_ISP,
+			"Stopping further IRQ processing from this HW index=%d",
+			handler_priv->core_index);
+		cam_irq_controller_disable_irq(core_info->vfe_irq_controller,
+			core_info->irq_err_handle);
+		cam_irq_controller_clear_and_mask(evt_id,
+			core_info->vfe_irq_controller);
+		error_flag = true;
+	}
+
+	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = handler_priv->core_index;
+	evt_payload->core_info  = handler_priv->core_info;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
+		evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+			irq_reg_offset[i]);
+	}
+
+	if (error_flag)
+		CAM_INFO(CAM_ISP, "Violation status = %x",
+			evt_payload->irq_reg_val[2]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+int cam_vfe_init_hw(void *hw_priv, void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_isp_resource_node      *isp_res = NULL;
+	int rc = 0;
+	uint32_t                           reset_core_args =
+					CAM_VFE_HW_RESET_HW_AND_REG;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	vfe_hw->open_count++;
+	if (vfe_hw->open_count > 1) {
+		mutex_unlock(&vfe_hw->hw_mutex);
+		CAM_DBG(CAM_ISP, "VFE has already been initialized cnt %d",
+			vfe_hw->open_count);
+		return 0;
+	}
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	soc_info = &vfe_hw->soc_info;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	/* Turn ON Regulators, Clocks and other SOC resources */
+	rc = cam_vfe_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Enable SOC failed");
+		rc = -EFAULT;
+		goto decrement_open_cnt;
+	}
+
+	isp_res   = (struct cam_isp_resource_node *)init_hw_args;
+	if (isp_res && isp_res->init) {
+		rc = isp_res->init(isp_res, NULL, 0);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "init Failed rc=%d", rc);
+			goto disable_soc;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Enable soc done");
+
+	/* Do HW Reset */
+	rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Reset Failed rc=%d", rc);
+		goto deinint_vfe_res;
+	}
+
+	rc = core_info->vfe_bus->hw_ops.init(core_info->vfe_bus->bus_priv,
+		NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Bus HW init Failed rc=%d", rc);
+		goto deinint_vfe_res;
+	}
+
+	vfe_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	return rc;
+
+deinint_vfe_res:
+	if (isp_res && isp_res->deinit)
+		isp_res->deinit(isp_res, NULL, 0);
+disable_soc:
+	cam_vfe_disable_soc_resources(soc_info);
+decrement_open_cnt:
+	mutex_lock(&vfe_hw->hw_mutex);
+	vfe_hw->open_count--;
+	mutex_unlock(&vfe_hw->hw_mutex);
+	return rc;
+}
+
+int cam_vfe_deinit_hw(void *hw_priv, void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_isp_resource_node      *isp_res = NULL;
+	int rc = 0;
+	uint32_t                           reset_core_args =
+					CAM_VFE_HW_RESET_HW_AND_REG;
+
+	CAM_DBG(CAM_ISP, "Enter");
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (!vfe_hw->open_count) {
+		mutex_unlock(&vfe_hw->hw_mutex);
+		CAM_ERR(CAM_ISP, "Error! Unbalanced deinit");
+		return -EFAULT;
+	}
+	vfe_hw->open_count--;
+	if (vfe_hw->open_count) {
+		mutex_unlock(&vfe_hw->hw_mutex);
+		CAM_DBG(CAM_ISP, "open_cnt non-zero =%d", vfe_hw->open_count);
+		return 0;
+	}
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	soc_info = &vfe_hw->soc_info;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	rc = core_info->vfe_bus->hw_ops.deinit(core_info->vfe_bus->bus_priv,
+		NULL, 0);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Bus HW deinit Failed rc=%d", rc);
+
+	isp_res   = (struct cam_isp_resource_node *)deinit_hw_args;
+	if (isp_res && isp_res->deinit) {
+		rc = isp_res->deinit(isp_res, NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_ISP, "deinit failed");
+	}
+
+	rc = cam_vfe_reset(hw_priv, &reset_core_args, sizeof(uint32_t));
+
+	/* Turn OFF Regulators, Clocks and other SOC resources */
+	CAM_DBG(CAM_ISP, "Disable SOC resource");
+	rc = cam_vfe_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Disable SOC failed");
+
+	vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+int cam_vfe_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	int rc;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	soc_info = &vfe_hw->soc_info;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	core_info->irq_payload.core_index = soc_info->index;
+	core_info->irq_payload.mem_base =
+		vfe_hw->soc_info.reg_map[VFE_CORE_BASE_IDX].mem_base;
+	core_info->irq_payload.core_info = core_info;
+	core_info->irq_payload.reset_complete = &vfe_hw->hw_complete;
+
+	core_info->irq_handle = cam_irq_controller_subscribe_irq(
+		core_info->vfe_irq_controller, CAM_IRQ_PRIORITY_0,
+		top_reset_irq_reg_mask, &core_info->irq_payload,
+		cam_vfe_reset_irq_top_half, NULL, NULL, NULL);
+	if (core_info->irq_handle < 0) {
+		CAM_ERR(CAM_ISP, "subscribe irq controller failed");
+		return -EFAULT;
+	}
+
+	reinit_completion(&vfe_hw->hw_complete);
+
+	CAM_DBG(CAM_ISP, "calling RESET on vfe %d", soc_info->index);
+	core_info->vfe_top->hw_ops.reset(core_info->vfe_top->top_priv,
+		reset_core_args, arg_size);
+	CAM_DBG(CAM_ISP, "waiting for vfe reset complete");
+	/* Wait for Completion or Timeout of 500ms */
+	rc = wait_for_completion_timeout(&vfe_hw->hw_complete, 500);
+	if (!rc)
+		CAM_ERR(CAM_ISP, "Error! Reset Timeout");
+
+	CAM_DBG(CAM_ISP, "reset complete done (%d)", rc);
+
+	rc = cam_irq_controller_unsubscribe_irq(
+		core_info->vfe_irq_controller, core_info->irq_handle);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Error! Unsubscribe failed");
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+void cam_isp_hw_get_timestamp(struct cam_isp_timestamp *time_stamp)
+{
+	struct timespec ts;
+
+	get_monotonic_boottime(&ts);
+	time_stamp->mono_time.tv_sec    = ts.tv_sec;
+	time_stamp->mono_time.tv_usec   = ts.tv_nsec/1000;
+}
+
+static int cam_vfe_irq_top_half(uint32_t    evt_id,
+	struct cam_irq_th_payload   *th_payload)
+{
+	int32_t                              rc;
+	int                                  i;
+	struct cam_vfe_irq_handler_priv     *handler_priv;
+	struct cam_vfe_top_irq_evt_payload  *evt_payload;
+	struct cam_vfe_hw_core_info         *core_info;
+
+	handler_priv = th_payload->handler_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = %x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = %x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_get_evt_payload(handler_priv->core_info, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "IRQ status0=0x%x status1=0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+		return rc;
+	}
+
+	core_info =  handler_priv->core_info;
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->core_index = handler_priv->core_index;
+	evt_payload->core_info  = handler_priv->core_info;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	for (; i < CAM_IFE_IRQ_REGISTERS_MAX; i++) {
+		evt_payload->irq_reg_val[i] = cam_io_r(handler_priv->mem_base +
+			irq_reg_offset[i]);
+	}
+	CAM_DBG(CAM_ISP, "Violation status = %x", evt_payload->irq_reg_val[2]);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+int cam_vfe_reserve(void *hw_priv, void *reserve_args, uint32_t arg_size)
+{
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_vfe_acquire_args       *acquire;
+	int rc = -ENODEV;
+
+
+	if (!hw_priv || !reserve_args || (arg_size !=
+		sizeof(struct cam_vfe_acquire_args))) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	acquire = (struct cam_vfe_acquire_args   *)reserve_args;
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_IN)
+		rc = core_info->vfe_top->hw_ops.reserve(
+			core_info->vfe_top->top_priv,
+			acquire,
+			sizeof(*acquire));
+	else if (acquire->rsrc_type == CAM_ISP_RESOURCE_VFE_OUT)
+		rc = core_info->vfe_bus->hw_ops.reserve(
+			core_info->vfe_bus->bus_priv, acquire,
+			sizeof(*acquire));
+	else
+		CAM_ERR(CAM_ISP, "Invalid res type:%d", acquire->rsrc_type);
+
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	return rc;
+}
+
+
+int cam_vfe_release(void *hw_priv, void *release_args, uint32_t arg_size)
+{
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *isp_res;
+	int rc = -ENODEV;
+
+	if (!hw_priv || !release_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	isp_res = (struct cam_isp_resource_node      *) release_args;
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN)
+		rc = core_info->vfe_top->hw_ops.release(
+			core_info->vfe_top->top_priv, isp_res,
+			sizeof(*isp_res));
+	else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT)
+		rc = core_info->vfe_bus->hw_ops.release(
+			core_info->vfe_bus->bus_priv, isp_res,
+			sizeof(*isp_res));
+	else
+		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
+
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	return rc;
+}
+
+
+int cam_vfe_start(void *hw_priv, void *start_args, uint32_t arg_size)
+{
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *isp_res;
+	int rc = 0;
+
+	if (!hw_priv || !start_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	isp_res = (struct cam_isp_resource_node  *)start_args;
+	core_info->tasklet_info = isp_res->tasklet_info;
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
+		if (isp_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+			isp_res->irq_handle =
+				cam_irq_controller_subscribe_irq(
+					core_info->vfe_irq_controller,
+					CAM_IRQ_PRIORITY_1,
+					camif_irq_reg_mask,
+					&core_info->irq_payload,
+					cam_vfe_irq_top_half,
+					cam_ife_mgr_do_tasklet,
+					isp_res->tasklet_info,
+					&tasklet_bh_api);
+			if (isp_res->irq_handle < 1)
+				rc = -ENOMEM;
+		} else if (isp_res->rdi_only_ctx) {
+			isp_res->irq_handle =
+				cam_irq_controller_subscribe_irq(
+					core_info->vfe_irq_controller,
+					CAM_IRQ_PRIORITY_1,
+					rdi_irq_reg_mask,
+					&core_info->irq_payload,
+					cam_vfe_irq_top_half,
+					cam_ife_mgr_do_tasklet,
+					isp_res->tasklet_info,
+					&tasklet_bh_api);
+			if (isp_res->irq_handle < 1)
+				rc = -ENOMEM;
+		}
+
+		if (rc == 0) {
+			rc = core_info->vfe_top->hw_ops.start(
+				core_info->vfe_top->top_priv, isp_res,
+				sizeof(struct cam_isp_resource_node));
+			if (rc)
+				CAM_ERR(CAM_ISP, "Start failed. type:%d",
+					isp_res->res_type);
+		} else {
+			CAM_ERR(CAM_ISP,
+				"Error! subscribe irq controller failed");
+		}
+	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
+		rc = core_info->vfe_bus->hw_ops.start(isp_res, NULL, 0);
+	} else {
+		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
+		rc = -EFAULT;
+	}
+
+	if (!core_info->irq_err_handle) {
+		core_info->irq_err_handle =
+			cam_irq_controller_subscribe_irq(
+				core_info->vfe_irq_controller,
+				CAM_IRQ_PRIORITY_0,
+				camif_irq_err_reg_mask,
+				&core_info->irq_payload,
+				cam_vfe_irq_err_top_half,
+				cam_ife_mgr_do_tasklet,
+				core_info->tasklet_info,
+				&tasklet_bh_api);
+		if (core_info->irq_err_handle < 1) {
+			CAM_ERR(CAM_ISP, "Error handle subscribe failure");
+			rc = -ENOMEM;
+			core_info->irq_err_handle = 0;
+		}
+	}
+
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	return rc;
+}
+
+int cam_vfe_stop(void *hw_priv, void *stop_args, uint32_t arg_size)
+{
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_hw_info                *vfe_hw  = hw_priv;
+	struct cam_isp_resource_node      *isp_res;
+	int rc = -EINVAL;
+
+	if (!hw_priv || !stop_args ||
+		(arg_size != sizeof(struct cam_isp_resource_node))) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	isp_res = (struct cam_isp_resource_node  *)stop_args;
+
+	mutex_lock(&vfe_hw->hw_mutex);
+	if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_IN) {
+		cam_irq_controller_unsubscribe_irq(
+			core_info->vfe_irq_controller, isp_res->irq_handle);
+		isp_res->irq_handle = 0;
+
+		rc = core_info->vfe_top->hw_ops.stop(
+			core_info->vfe_top->top_priv, isp_res,
+			sizeof(struct cam_isp_resource_node));
+	} else if (isp_res->res_type == CAM_ISP_RESOURCE_VFE_OUT) {
+		rc = core_info->vfe_bus->hw_ops.stop(isp_res, NULL, 0);
+	} else {
+		CAM_ERR(CAM_ISP, "Invalid res type:%d", isp_res->res_type);
+	}
+
+	if (core_info->irq_err_handle) {
+		cam_irq_controller_unsubscribe_irq(
+			core_info->vfe_irq_controller,
+			core_info->irq_err_handle);
+		core_info->irq_err_handle = 0;
+	}
+
+	mutex_unlock(&vfe_hw->hw_mutex);
+
+	return rc;
+}
+
+int cam_vfe_read(void *hw_priv, void *read_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_write(void *hw_priv, void *write_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info                *vfe_hw = hw_priv;
+	struct cam_hw_soc_info            *soc_info = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_vfe_hw_info            *hw_info = NULL;
+	int rc = 0;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	soc_info = &vfe_hw->soc_info;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	hw_info = core_info->vfe_hw_info;
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+	case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+	case CAM_ISP_HW_CMD_BW_UPDATE:
+	case CAM_ISP_HW_CMD_BW_CONTROL:
+		rc = core_info->vfe_top->hw_ops.process_cmd(
+			core_info->vfe_top->top_priv, cmd_type, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+	case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+	case CAM_ISP_HW_CMD_STRIPE_UPDATE:
+	case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ:
+	case CAM_ISP_HW_CMD_UBWC_UPDATE:
+		rc = core_info->vfe_bus->hw_ops.process_cmd(
+			core_info->vfe_bus->bus_priv, cmd_type, cmd_args,
+			arg_size);
+		break;
+
+	default:
+		CAM_ERR(CAM_ISP, "Invalid cmd type:%d", cmd_type);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+irqreturn_t cam_vfe_irq(int irq_num, void *data)
+{
+	struct cam_hw_info            *vfe_hw;
+	struct cam_vfe_hw_core_info   *core_info;
+
+	if (!data)
+		return IRQ_NONE;
+
+	vfe_hw = (struct cam_hw_info *)data;
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	return cam_irq_controller_handle_irq(irq_num,
+		core_info->vfe_irq_controller);
+}
+
+int cam_vfe_core_init(struct cam_vfe_hw_core_info  *core_info,
+	struct cam_hw_soc_info                     *soc_info,
+	struct cam_hw_intf                         *hw_intf,
+	struct cam_vfe_hw_info                     *vfe_hw_info)
+{
+	int rc = -EINVAL;
+	int i;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	rc = cam_irq_controller_init(drv_name,
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX),
+		vfe_hw_info->irq_reg_info, &core_info->vfe_irq_controller);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! cam_irq_controller_init failed");
+		return rc;
+	}
+
+	rc = cam_vfe_top_init(vfe_hw_info->top_version,
+		soc_info, hw_intf, vfe_hw_info->top_hw_info,
+		&core_info->vfe_top);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! cam_vfe_top_init failed");
+		goto deinit_controller;
+	}
+
+	rc = cam_vfe_bus_init(vfe_hw_info->bus_version, soc_info, hw_intf,
+		vfe_hw_info->bus_hw_info, core_info->vfe_irq_controller,
+		&core_info->vfe_bus);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! cam_vfe_bus_init failed");
+		goto deinit_top;
+	}
+
+	INIT_LIST_HEAD(&core_info->free_payload_list);
+	for (i = 0; i < CAM_VFE_EVT_MAX; i++) {
+		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+		list_add_tail(&core_info->evt_payload[i].list,
+			&core_info->free_payload_list);
+	}
+
+	spin_lock_init(&core_info->spin_lock);
+
+	return rc;
+
+deinit_top:
+	cam_vfe_top_deinit(vfe_hw_info->top_version,
+		&core_info->vfe_top);
+
+deinit_controller:
+	cam_irq_controller_deinit(&core_info->vfe_irq_controller);
+
+	return rc;
+}
+
+int cam_vfe_core_deinit(struct cam_vfe_hw_core_info  *core_info,
+	struct cam_vfe_hw_info                       *vfe_hw_info)
+{
+	int                rc = -EINVAL;
+	int                i;
+	unsigned long      flags;
+
+	spin_lock_irqsave(&core_info->spin_lock, flags);
+
+	INIT_LIST_HEAD(&core_info->free_payload_list);
+	for (i = 0; i < CAM_VFE_EVT_MAX; i++)
+		INIT_LIST_HEAD(&core_info->evt_payload[i].list);
+
+	rc = cam_vfe_bus_deinit(vfe_hw_info->bus_version,
+		&core_info->vfe_bus);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Error cam_vfe_bus_deinit failed rc=%d", rc);
+
+	rc = cam_vfe_top_deinit(vfe_hw_info->top_version,
+		&core_info->vfe_top);
+	if (rc)
+		CAM_ERR(CAM_ISP, "Error cam_vfe_top_deinit failed rc=%d", rc);
+
+	rc = cam_irq_controller_deinit(&core_info->vfe_irq_controller);
+	if (rc)
+		CAM_ERR(CAM_ISP,
+			"Error cam_irq_controller_deinit failed rc=%d", rc);
+
+	spin_unlock_irqrestore(&core_info->spin_lock, flags);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
new file mode 100644
index 0000000..d525fad
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_CORE_H_
+#define _CAM_VFE_CORE_H_
+
+#include <linux/spinlock.h>
+#include "cam_hw_intf.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_hw_intf.h"
+
+struct cam_vfe_hw_info {
+	struct cam_irq_controller_reg_info *irq_reg_info;
+
+	uint32_t                          bus_version;
+	void                             *bus_hw_info;
+
+	uint32_t                          top_version;
+	void                             *top_hw_info;
+	uint32_t                          camif_version;
+	void                             *camif_reg;
+
+	uint32_t                          camif_lite_version;
+	void                             *camif_lite_reg;
+
+	uint32_t                          testgen_version;
+	void                             *testgen_reg;
+
+	uint32_t                          num_qos_settings;
+	struct cam_isp_reg_val_pair      *qos_settings;
+
+	uint32_t                          num_ds_settings;
+	struct cam_isp_reg_val_pair      *ds_settings;
+
+	uint32_t                          num_vbif_settings;
+	struct cam_isp_reg_val_pair      *vbif_settings;
+};
+
+#define CAM_VFE_EVT_MAX                    256
+
+struct cam_vfe_hw_core_info {
+	struct cam_vfe_hw_info             *vfe_hw_info;
+	void                               *vfe_irq_controller;
+	struct cam_vfe_top                 *vfe_top;
+	struct cam_vfe_bus                 *vfe_bus;
+	void                               *tasklet_info;
+	struct cam_vfe_top_irq_evt_payload  evt_payload[CAM_VFE_EVT_MAX];
+	struct list_head                    free_payload_list;
+	struct cam_vfe_irq_handler_priv     irq_payload;
+	uint32_t                            cpas_handle;
+	int                                 irq_handle;
+	int                                 irq_err_handle;
+	spinlock_t                          spin_lock;
+};
+
+int cam_vfe_get_hw_caps(void *device_priv,
+	void *get_hw_cap_args, uint32_t arg_size);
+int cam_vfe_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_vfe_deinit_hw(void *hw_priv,
+	void *deinit_hw_args, uint32_t arg_size);
+int cam_vfe_reset(void *device_priv,
+	void *reset_core_args, uint32_t arg_size);
+int cam_vfe_reserve(void *device_priv,
+	void *reserve_args, uint32_t arg_size);
+int cam_vfe_release(void *device_priv,
+	void *reserve_args, uint32_t arg_size);
+int cam_vfe_start(void *device_priv,
+	void *start_args, uint32_t arg_size);
+int cam_vfe_stop(void *device_priv,
+	void *stop_args, uint32_t arg_size);
+int cam_vfe_read(void *device_priv,
+	void *read_args, uint32_t arg_size);
+int cam_vfe_write(void *device_priv,
+	void *write_args, uint32_t arg_size);
+int cam_vfe_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+
+irqreturn_t cam_vfe_irq(int irq_num, void *data);
+
+int cam_vfe_core_init(struct cam_vfe_hw_core_info *core_info,
+	struct cam_hw_soc_info             *soc_info,
+	struct cam_hw_intf                 *hw_intf,
+	struct cam_vfe_hw_info             *vfe_hw_info);
+
+int cam_vfe_core_deinit(struct cam_vfe_hw_core_info *core_info,
+	struct cam_vfe_hw_info             *vfe_hw_info);
+
+#endif /* _CAM_VFE_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
new file mode 100644
index 0000000..022569f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include "cam_vfe_dev.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_soc.h"
+#include "cam_debug_util.h"
+
+static struct cam_hw_intf *cam_vfe_hw_list[CAM_VFE_HW_NUM_MAX] = {0, 0, 0, 0};
+
+static char vfe_dev_name[8];
+
+int cam_vfe_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info                *vfe_hw = NULL;
+	struct cam_hw_intf                *vfe_hw_intf = NULL;
+	const struct of_device_id         *match_dev = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	struct cam_vfe_hw_info            *hw_info = NULL;
+	int                                rc = 0;
+
+	vfe_hw_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!vfe_hw_intf) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &vfe_hw_intf->hw_idx);
+
+	vfe_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!vfe_hw) {
+		rc = -ENOMEM;
+		goto free_vfe_hw_intf;
+	}
+
+	memset(vfe_dev_name, 0, sizeof(vfe_dev_name));
+	snprintf(vfe_dev_name, sizeof(vfe_dev_name),
+		"vfe%1u", vfe_hw_intf->hw_idx);
+
+	vfe_hw->soc_info.pdev = pdev;
+	vfe_hw->soc_info.dev = &pdev->dev;
+	vfe_hw->soc_info.dev_name = vfe_dev_name;
+	vfe_hw_intf->hw_priv = vfe_hw;
+	vfe_hw_intf->hw_ops.get_hw_caps = cam_vfe_get_hw_caps;
+	vfe_hw_intf->hw_ops.init = cam_vfe_init_hw;
+	vfe_hw_intf->hw_ops.deinit = cam_vfe_deinit_hw;
+	vfe_hw_intf->hw_ops.reset = cam_vfe_reset;
+	vfe_hw_intf->hw_ops.reserve = cam_vfe_reserve;
+	vfe_hw_intf->hw_ops.release = cam_vfe_release;
+	vfe_hw_intf->hw_ops.start = cam_vfe_start;
+	vfe_hw_intf->hw_ops.stop = cam_vfe_stop;
+	vfe_hw_intf->hw_ops.read = cam_vfe_read;
+	vfe_hw_intf->hw_ops.write = cam_vfe_write;
+	vfe_hw_intf->hw_ops.process_cmd = cam_vfe_process_cmd;
+	vfe_hw_intf->hw_type = CAM_ISP_HW_TYPE_VFE;
+
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+
+	platform_set_drvdata(pdev, vfe_hw_intf);
+
+	vfe_hw->core_info = kzalloc(sizeof(struct cam_vfe_hw_core_info),
+		GFP_KERNEL);
+	if (!vfe_hw->core_info) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for core");
+		rc = -ENOMEM;
+		goto free_vfe_hw;
+	}
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_ISP, "Of_match Failed");
+		rc = -EINVAL;
+		goto free_core_info;
+	}
+	hw_info = (struct cam_vfe_hw_info *)match_dev->data;
+	core_info->vfe_hw_info = hw_info;
+
+	rc = cam_vfe_init_soc_resources(&vfe_hw->soc_info, cam_vfe_irq,
+		vfe_hw);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "Failed to init soc rc=%d", rc);
+		goto free_core_info;
+	}
+
+	rc = cam_vfe_core_init(core_info, &vfe_hw->soc_info,
+		vfe_hw_intf, hw_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "Failed to init core rc=%d", rc);
+		goto deinit_soc;
+	}
+
+	vfe_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&vfe_hw->hw_mutex);
+	spin_lock_init(&vfe_hw->hw_lock);
+	init_completion(&vfe_hw->hw_complete);
+
+	if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX)
+		cam_vfe_hw_list[vfe_hw_intf->hw_idx] = vfe_hw_intf;
+
+	cam_vfe_init_hw(vfe_hw, NULL, 0);
+	cam_vfe_deinit_hw(vfe_hw, NULL, 0);
+
+	CAM_DBG(CAM_ISP, "VFE%d probe successful", vfe_hw_intf->hw_idx);
+
+	return rc;
+
+deinit_soc:
+	if (cam_vfe_deinit_soc_resources(&vfe_hw->soc_info))
+		CAM_ERR(CAM_ISP, "Failed to deinit soc");
+free_core_info:
+	kfree(vfe_hw->core_info);
+free_vfe_hw:
+	kfree(vfe_hw);
+free_vfe_hw_intf:
+	kfree(vfe_hw_intf);
+end:
+	return rc;
+}
+
+int cam_vfe_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info                *vfe_hw = NULL;
+	struct cam_hw_intf                *vfe_hw_intf = NULL;
+	struct cam_vfe_hw_core_info       *core_info = NULL;
+	int                                rc = 0;
+
+	vfe_hw_intf = platform_get_drvdata(pdev);
+	if (!vfe_hw_intf) {
+		CAM_ERR(CAM_ISP, "Error! No data in pdev");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ISP, "type %d index %d",
+		vfe_hw_intf->hw_type, vfe_hw_intf->hw_idx);
+
+	if (vfe_hw_intf->hw_idx < CAM_VFE_HW_NUM_MAX)
+		cam_vfe_hw_list[vfe_hw_intf->hw_idx] = NULL;
+
+	vfe_hw = vfe_hw_intf->hw_priv;
+	if (!vfe_hw) {
+		CAM_ERR(CAM_ISP, "Error! HW data is NULL");
+		rc = -ENODEV;
+		goto free_vfe_hw_intf;
+	}
+
+	core_info = (struct cam_vfe_hw_core_info *)vfe_hw->core_info;
+	if (!core_info) {
+		CAM_ERR(CAM_ISP, "Error! core data NULL");
+		rc = -EINVAL;
+		goto deinit_soc;
+	}
+
+	rc = cam_vfe_core_deinit(core_info, core_info->vfe_hw_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP, "Failed to deinit core rc=%d", rc);
+
+	kfree(vfe_hw->core_info);
+
+deinit_soc:
+	rc = cam_vfe_deinit_soc_resources(&vfe_hw->soc_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP, "Failed to deinit soc rc=%d", rc);
+
+	mutex_destroy(&vfe_hw->hw_mutex);
+	kfree(vfe_hw);
+
+	CAM_DBG(CAM_ISP, "VFE%d remove successful", vfe_hw_intf->hw_idx);
+
+free_vfe_hw_intf:
+	kfree(vfe_hw_intf);
+
+	return rc;
+}
+
+int cam_vfe_hw_init(struct cam_hw_intf **vfe_hw, uint32_t hw_idx)
+{
+	int rc = 0;
+
+	if (cam_vfe_hw_list[hw_idx]) {
+		*vfe_hw = cam_vfe_hw_list[hw_idx];
+		rc = 0;
+	} else {
+		*vfe_hw = NULL;
+		rc = -ENODEV;
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
new file mode 100644
index 0000000..126580b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_dev.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_DEV_H_
+#define _CAM_VFE_DEV_H_
+
+#include <linux/platform_device.h>
+
+/*
+ * cam_vfe_probe()
+ *
+ * @brief:                   Driver probe function called on Boot
+ *
+ * @pdev:                    Platform Device pointer
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_probe(struct platform_device *pdev);
+
+/*
+ * cam_vfe_remove()
+ *
+ * @brief:                   Driver remove function
+ *
+ * @pdev:                    Platform Device pointer
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_remove(struct platform_device *pdev);
+
+#endif /* _CAM_VFE_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
new file mode 100644
index 0000000..8007815
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "cam_cpas_api.h"
+#include "cam_vfe_soc.h"
+#include "cam_debug_util.h"
+
+static bool cam_vfe_cpas_cb(uint32_t client_handle, void *userdata,
+	struct cam_cpas_irq_data *irq_data)
+{
+	bool error_handled = false;
+
+	if (!irq_data)
+		return error_handled;
+
+	switch (irq_data->irq_type) {
+	case CAM_CAMNOC_IRQ_IFE02_UBWC_ENCODE_ERROR:
+	case CAM_CAMNOC_IRQ_IFE13_UBWC_ENCODE_ERROR:
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IFE UBWC Encode error type=%d status=%x",
+			irq_data->irq_type,
+			irq_data->u.enc_err.encerr_status.value);
+		error_handled = true;
+		break;
+	default:
+		break;
+	}
+
+	return error_handled;
+}
+
+static int cam_vfe_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! get DT properties failed rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int cam_vfe_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info, vfe_irq_handler,
+		irq_data);
+	if (rc)
+		CAM_ERR(CAM_ISP,
+			"Error! Request platform resource failed rc=%d", rc);
+
+	return rc;
+}
+
+static int cam_vfe_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_ISP,
+			"Error! Release platform resource failed rc=%d", rc);
+
+	return rc;
+}
+
+int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data)
+{
+	int                               rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+	struct cam_cpas_register_params   cpas_register_param;
+
+	soc_private = kzalloc(sizeof(struct cam_vfe_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		CAM_DBG(CAM_ISP, "Error! soc_private Alloc Failed");
+		return -ENOMEM;
+	}
+	soc_info->soc_private = soc_private;
+
+	rc = cam_vfe_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP, "Error! Get DT properties failed rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	rc = cam_soc_util_get_option_clk_by_name(soc_info,
+		CAM_VFE_DSP_CLK_NAME, &soc_private->dsp_clk,
+		&soc_private->dsp_clk_index, &soc_private->dsp_clk_rate);
+	if (rc)
+		CAM_WARN(CAM_ISP, "option clk get failed");
+
+	rc = cam_vfe_request_platform_resource(soc_info, vfe_irq_handler,
+		irq_data);
+	if (rc < 0) {
+		CAM_ERR(CAM_ISP,
+			"Error! Request platform resources failed rc=%d", rc);
+		goto free_soc_private;
+	}
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier, "ife",
+		CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = soc_info->dev;
+	cpas_register_param.cam_cpas_client_cb = cam_vfe_cpas_cb;
+	cpas_register_param.userdata = soc_info;
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "CPAS registration failed rc=%d", rc);
+		goto release_soc;
+	} else {
+		soc_private->cpas_handle = cpas_register_param.client_handle;
+	}
+
+	return rc;
+
+release_soc:
+	cam_soc_util_release_platform_resource(soc_info);
+free_soc_private:
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int                               rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error! soc_info NULL");
+		return -ENODEV;
+	}
+
+	soc_private = soc_info->soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error! soc_private NULL");
+		return -ENODEV;
+	}
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_ISP, "CPAS unregistration failed rc=%d", rc);
+
+	rc = cam_vfe_release_platform_resource(soc_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP,
+			"Error! Release platform resources failed rc=%d", rc);
+
+	rc = cam_soc_util_clk_put(&soc_private->dsp_clk);
+	if (rc < 0)
+		CAM_ERR(CAM_ISP,
+			"Error Put dsp clk failed rc=%d", rc);
+
+	kfree(soc_private);
+
+	return rc;
+}
+
+int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int                               rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+	struct cam_ahb_vote               ahb_vote;
+	struct cam_axi_vote               axi_vote;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error! Invalid params");
+		rc = -EINVAL;
+		goto end;
+	}
+	soc_private = soc_info->soc_private;
+
+	ahb_vote.type       = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+
+	axi_vote.compressed_bw   = 10640000000L;
+	axi_vote.uncompressed_bw = 10640000000L;
+
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! CPAS start failed rc=%d", rc);
+		rc = -EFAULT;
+		goto end;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_TURBO_VOTE, true);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! enable platform failed rc=%d", rc);
+		goto stop_cpas;
+	}
+
+	return rc;
+
+stop_cpas:
+	cam_cpas_stop(soc_private->cpas_handle);
+end:
+	return rc;
+}
+
+int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name)
+{
+	int  rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	if (strcmp(clk_name, CAM_VFE_DSP_CLK_NAME) == 0) {
+		rc = cam_soc_util_clk_enable(soc_private->dsp_clk,
+			CAM_VFE_DSP_CLK_NAME, soc_private->dsp_clk_rate);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+			"Error enable dsp clk failed rc=%d", rc);
+	}
+
+	return rc;
+}
+
+int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name)
+{
+	int  rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error Invalid params");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	if (strcmp(clk_name, CAM_VFE_DSP_CLK_NAME) == 0) {
+		rc = cam_soc_util_clk_disable(soc_private->dsp_clk,
+			CAM_VFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+			"Error enable dsp clk failed rc=%d", rc);
+	}
+
+	return rc;
+}
+
+
+int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0;
+	struct cam_vfe_soc_private       *soc_private;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_ISP, "Error! Invalid params");
+		rc = -EINVAL;
+		return rc;
+	}
+	soc_private = soc_info->soc_private;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Disable platform failed rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Error! CPAS stop failed rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
new file mode 100644
index 0000000..2601734
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_soc.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_SOC_H_
+#define _CAM_VFE_SOC_H_
+
+#include "cam_soc_util.h"
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_DSP_CLK_NAME "ife_dsp_clk"
+
+/*
+ * struct cam_vfe_soc_private:
+ *
+ * @Brief:                   Private SOC data specific to VFE HW Driver
+ *
+ * @cpas_handle:             Handle returned on registering with CPAS driver.
+ *                           This handle is used for all further interface
+ *                           with CPAS.
+ */
+struct cam_vfe_soc_private {
+	uint32_t    cpas_handle;
+	struct clk *dsp_clk;
+	int32_t     dsp_clk_index;
+	int32_t     dsp_clk_rate;
+};
+
+/*
+ * cam_vfe_init_soc_resources()
+ *
+ * @Brief:                   Initialize SOC resources including private data
+ *
+ * @soc_info:                Device soc information
+ * @handler:                 Irq handler function pointer
+ * @irq_data:                Irq handler function Callback data
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t vfe_irq_handler, void *irq_data);
+
+/*
+ * cam_vfe_deinit_soc_resources()
+ *
+ * @Brief:                   Deinitialize SOC resources including private data
+ *
+ * @soc_info:                Device soc information
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_deinit_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_vfe_enable_soc_resources()
+ *
+ * @brief:                   Enable regulator, irq resources, start CPAS
+ *
+ * @soc_info:                Device soc information
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_vfe_disable_soc_resources()
+ *
+ * @brief:                   Disable regulator, irq resources, stop CPAS
+ *
+ * @soc_info:                Device soc information
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+/*
+ * cam_vfe_soc_enable_clk()
+ *
+ * @brief:                   Enable clock with given name
+ *
+ * @soc_info:                Device soc information
+ * @clk_name:                Name of clock to enable
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_soc_enable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name);
+
+/*
+ * cam_vfe_soc_disable_dsp_clk()
+ *
+ * @brief:                   Disable clock with given name
+ *
+ * @soc_info:                Device soc information
+ * @clk_name:                Name of clock to enable
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_soc_disable_clk(struct cam_hw_soc_info *soc_info,
+	const char *clk_name);
+
+#endif /* _CAM_VFE_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile
new file mode 100644
index 0000000..8476a11
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe17x.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
new file mode 100644
index 0000000..c729b5b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe170.h
@@ -0,0 +1,843 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE170_H_
+#define _CAM_VFE170_H_
+
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_core.h"
+
+static struct cam_irq_register_set vfe170_top_irq_reg_set[2] = {
+	{
+		.mask_reg_offset   = 0x0000005C,
+		.clear_reg_offset  = 0x00000064,
+		.status_reg_offset = 0x0000006C,
+	},
+	{
+		.mask_reg_offset   = 0x00000060,
+		.clear_reg_offset  = 0x00000068,
+		.status_reg_offset = 0x00000070,
+	},
+};
+
+static struct cam_irq_controller_reg_info vfe170_top_irq_reg_info = {
+	.num_registers = 2,
+	.irq_reg_set = vfe170_top_irq_reg_set,
+	.global_clear_offset  = 0x00000058,
+	.global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_camif_ver2_reg vfe170_camif_reg = {
+	.camif_cmd                = 0x00000478,
+	.camif_config             = 0x0000047C,
+	.line_skip_pattern        = 0x00000488,
+	.pixel_skip_pattern       = 0x0000048C,
+	.skip_period              = 0x00000490,
+	.irq_subsample_pattern    = 0x0000049C,
+	.epoch_irq                = 0x000004A0,
+	.raw_crop_width_cfg       = 0x00000CE4,
+	.raw_crop_height_cfg      = 0x00000CE8,
+	.reg_update_cmd           = 0x000004AC,
+	.vfe_diag_config          = 0x00000C48,
+	.vfe_diag_sensor_status   = 0x00000C4C,
+};
+
+static struct cam_vfe_camif_reg_data vfe_170_camif_reg_data = {
+	.raw_crop_first_pixel_shift      = 16,
+	.raw_crop_first_pixel_mask       = 0xFFFF,
+	.raw_crop_last_pixel_shift       = 0x0,
+	.raw_crop_last_pixel_mask        = 0x3FFF,
+	.raw_crop_first_line_shift       = 16,
+	.raw_crop_first_line_mask        = 0xFFFF,
+	.raw_crop_last_line_shift        = 0,
+	.raw_crop_last_line_mask         = 0x3FFF,
+	.input_mux_sel_shift             = 5,
+	.input_mux_sel_mask              = 0x3,
+	.extern_reg_update_shift         = 4,
+	.extern_reg_update_mask          = 1,
+	.pixel_pattern_shift             = 0,
+	.pixel_pattern_mask              = 0x7,
+	.dsp_mode_shift                  = 23,
+	.dsp_mode_mask                   = 0x1,
+	.dsp_en_shift                    = 3,
+	.dsp_en_mask                     = 0x1,
+	.reg_update_cmd_data             = 0x1,
+	.epoch_line_cfg                  = 0x00140014,
+	.sof_irq_mask                    = 0x00000001,
+	.epoch0_irq_mask                 = 0x00000004,
+	.reg_update_irq_mask             = 0x00000010,
+	.eof_irq_mask                    = 0x00000002,
+	.error_irq_mask0                 = 0x0003FC00,
+	.error_irq_mask1                 = 0x0FFF7E80,
+	.enable_diagnostic_hw            = 0x1,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_170_reg = {
+	.reset    = 0x0000001C,
+	.cgc_ovd  = 0x0000002C,
+	.enable   = 0x00000040,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_170_reg = {
+	.reset    = 0x00000020,
+	.cgc_ovd  = 0x00000030,
+	.enable   = 0x00000044,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl color_170_reg = {
+	.reset    = 0x00000024,
+	.cgc_ovd  = 0x00000034,
+	.enable   = 0x00000048,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_170_reg = {
+	.reset    = 0x00000028,
+	.cgc_ovd  = 0x00000038,
+	.enable   = 0x0000004C,
+};
+
+static struct cam_vfe_top_ver2_reg_offset_common vfe170_top_common_reg = {
+	.hw_version               = 0x00000000,
+	.hw_capability            = 0x00000004,
+	.lens_feature             = 0x00000008,
+	.stats_feature            = 0x0000000C,
+	.color_feature            = 0x00000010,
+	.zoom_feature             = 0x00000014,
+	.global_reset_cmd         = 0x00000018,
+	.module_ctrl              = {
+		&lens_170_reg,
+		&stats_170_reg,
+		&color_170_reg,
+		&zoom_170_reg,
+	},
+	.bus_cgc_ovd              = 0x0000003C,
+	.core_cfg                 = 0x00000050,
+	.three_D_cfg              = 0x00000054,
+	.violation_status         = 0x0000007C,
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_ver2_reg vfe170_rdi_reg = {
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_170_rdi_0_data = {
+	.reg_update_cmd_data      = 0x2,
+	.sof_irq_mask             = 0x8000000,
+	.reg_update_irq_mask      = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_170_rdi_1_data = {
+	.reg_update_cmd_data      = 0x4,
+	.sof_irq_mask             = 0x10000000,
+	.reg_update_irq_mask      = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_170_rdi_2_data = {
+	.reg_update_cmd_data      = 0x8,
+	.sof_irq_mask             = 0x20000000,
+	.reg_update_irq_mask      = 0x80,
+};
+
+static struct cam_vfe_top_ver2_hw_info vfe170_top_hw_info = {
+	.common_reg = &vfe170_top_common_reg,
+	.camif_hw_info = {
+		.common_reg     = &vfe170_top_common_reg,
+		.camif_reg      = &vfe170_camif_reg,
+		.reg_data       = &vfe_170_camif_reg_data,
+		},
+	.camif_lite_hw_info = {
+		.common_reg     = NULL,
+		.camif_lite_reg = NULL,
+		.reg_data       = NULL,
+		},
+	.rdi_hw_info = {
+		.common_reg = &vfe170_top_common_reg,
+		.rdi_reg    = &vfe170_rdi_reg,
+		.reg_data = {
+			&vfe_170_rdi_0_data,
+			&vfe_170_rdi_1_data,
+			&vfe_170_rdi_2_data,
+			NULL,
+			},
+		},
+	.mux_type = {
+		CAM_VFE_CAMIF_VER_2_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+	},
+};
+
+static struct cam_irq_register_set vfe170_bus_irq_reg[3] = {
+		{
+			.mask_reg_offset   = 0x00002044,
+			.clear_reg_offset  = 0x00002050,
+			.status_reg_offset = 0x0000205C,
+		},
+		{
+			.mask_reg_offset   = 0x00002048,
+			.clear_reg_offset  = 0x00002054,
+			.status_reg_offset = 0x00002060,
+		},
+		{
+			.mask_reg_offset   = 0x0000204C,
+			.clear_reg_offset  = 0x00002058,
+			.status_reg_offset = 0x00002064,
+		},
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_3 = {
+	.tile_cfg         = 0x0000252C,
+	.h_init           = 0x00002530,
+	.v_init           = 0x00002534,
+	.meta_addr        = 0x00002538,
+	.meta_offset      = 0x0000253C,
+	.meta_stride      = 0x00002540,
+	.mode_cfg_0       = 0x00002544,
+	.bw_limit         = 0x000025A0,
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_client ubwc_regs_client_4 = {
+	.tile_cfg         = 0x0000262C,
+	.h_init           = 0x00002630,
+	.v_init           = 0x00002634,
+	.meta_addr        = 0x00002638,
+	.meta_offset      = 0x0000263C,
+	.meta_stride      = 0x00002640,
+	.mode_cfg_0       = 0x00002644,
+	.bw_limit         = 0x000026A0,
+};
+
+static struct cam_vfe_bus_ver2_hw_info vfe170_bus_hw_info = {
+	.common_reg = {
+		.hw_version                   = 0x00002000,
+		.hw_capability                = 0x00002004,
+		.sw_reset                     = 0x00002008,
+		.cgc_ovd                      = 0x0000200C,
+		.pwr_iso_cfg                  = 0x000020CC,
+		.dual_master_comp_cfg         = 0x00002028,
+		.irq_reg_info = {
+			.num_registers        = 3,
+			.irq_reg_set          = vfe170_bus_irq_reg,
+			.global_clear_offset  = 0x00002068,
+			.global_clear_bitmask = 0x00000001,
+		},
+		.comp_error_status            = 0x0000206C,
+		.comp_ovrwr_status            = 0x00002070,
+		.dual_comp_error_status       = 0x00002074,
+		.dual_comp_ovrwr_status       = 0x00002078,
+		.addr_sync_cfg                = 0x0000207C,
+		.addr_sync_frame_hdr          = 0x00002080,
+		.addr_sync_no_sync            = 0x00002084,
+		.debug_status_cfg             = 0x0000226C,
+		.debug_status_0               = 0x00002270,
+	},
+	.num_client = 20,
+	.bus_client_reg = {
+		/* BUS Client 0 */
+		{
+			.status0                  = 0x00002200,
+			.status1                  = 0x00002204,
+			.cfg                      = 0x00002208,
+			.header_addr              = 0x0000220C,
+			.header_cfg               = 0x00002210,
+			.image_addr               = 0x00002214,
+			.image_addr_offset        = 0x00002218,
+			.buffer_width_cfg         = 0x0000221C,
+			.buffer_height_cfg        = 0x00002220,
+			.packer_cfg               = 0x00002224,
+			.stride                   = 0x00002228,
+			.irq_subsample_period     = 0x00002248,
+			.irq_subsample_pattern    = 0x0000224C,
+			.framedrop_period         = 0x00002250,
+			.framedrop_pattern        = 0x00002254,
+			.frame_inc                = 0x00002258,
+			.burst_limit              = 0x0000225C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 1 */
+		{
+			.status0                  = 0x00002300,
+			.status1                  = 0x00002304,
+			.cfg                      = 0x00002308,
+			.header_addr              = 0x0000230C,
+			.header_cfg               = 0x00002310,
+			.image_addr               = 0x00002314,
+			.image_addr_offset        = 0x00002318,
+			.buffer_width_cfg         = 0x0000231C,
+			.buffer_height_cfg        = 0x00002320,
+			.packer_cfg               = 0x00002324,
+			.stride                   = 0x00002328,
+			.irq_subsample_period     = 0x00002348,
+			.irq_subsample_pattern    = 0x0000234C,
+			.framedrop_period         = 0x00002350,
+			.framedrop_pattern        = 0x00002354,
+			.frame_inc                = 0x00002358,
+			.burst_limit              = 0x0000235C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 2 */
+		{
+			.status0                  = 0x00002400,
+			.status1                  = 0x00002404,
+			.cfg                      = 0x00002408,
+			.header_addr              = 0x0000240C,
+			.header_cfg               = 0x00002410,
+			.image_addr               = 0x00002414,
+			.image_addr_offset        = 0x00002418,
+			.buffer_width_cfg         = 0x0000241C,
+			.buffer_height_cfg        = 0x00002420,
+			.packer_cfg               = 0x00002424,
+			.stride                   = 0x00002428,
+			.irq_subsample_period     = 0x00002448,
+			.irq_subsample_pattern    = 0x0000244C,
+			.framedrop_period         = 0x00002450,
+			.framedrop_pattern        = 0x00002454,
+			.frame_inc                = 0x00002458,
+			.burst_limit              = 0x0000245C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 3 */
+		{
+			.status0                  = 0x00002500,
+			.status1                  = 0x00002504,
+			.cfg                      = 0x00002508,
+			.header_addr              = 0x0000250C,
+			.header_cfg               = 0x00002510,
+			.image_addr               = 0x00002514,
+			.image_addr_offset        = 0x00002518,
+			.buffer_width_cfg         = 0x0000251C,
+			.buffer_height_cfg        = 0x00002520,
+			.packer_cfg               = 0x00002524,
+			.stride                   = 0x00002528,
+			.irq_subsample_period     = 0x00002548,
+			.irq_subsample_pattern    = 0x0000254C,
+			.framedrop_period         = 0x00002550,
+			.framedrop_pattern        = 0x00002554,
+			.frame_inc                = 0x00002558,
+			.burst_limit              = 0x0000255C,
+			.ubwc_regs                = &ubwc_regs_client_3,
+		},
+		/* BUS Client 4 */
+		{
+			.status0                  = 0x00002600,
+			.status1                  = 0x00002604,
+			.cfg                      = 0x00002608,
+			.header_addr              = 0x0000260C,
+			.header_cfg               = 0x00002610,
+			.image_addr               = 0x00002614,
+			.image_addr_offset        = 0x00002618,
+			.buffer_width_cfg         = 0x0000261C,
+			.buffer_height_cfg        = 0x00002620,
+			.packer_cfg               = 0x00002624,
+			.stride                   = 0x00002628,
+			.irq_subsample_period     = 0x00002648,
+			.irq_subsample_pattern    = 0x0000264C,
+			.framedrop_period         = 0x00002650,
+			.framedrop_pattern        = 0x00002654,
+			.frame_inc                = 0x00002658,
+			.burst_limit              = 0x0000265C,
+			.ubwc_regs                = &ubwc_regs_client_4,
+		},
+		/* BUS Client 5 */
+		{
+			.status0                  = 0x00002700,
+			.status1                  = 0x00002704,
+			.cfg                      = 0x00002708,
+			.header_addr              = 0x0000270C,
+			.header_cfg               = 0x00002710,
+			.image_addr               = 0x00002714,
+			.image_addr_offset        = 0x00002718,
+			.buffer_width_cfg         = 0x0000271C,
+			.buffer_height_cfg        = 0x00002720,
+			.packer_cfg               = 0x00002724,
+			.stride                   = 0x00002728,
+			.irq_subsample_period     = 0x00002748,
+			.irq_subsample_pattern    = 0x0000274C,
+			.framedrop_period         = 0x00002750,
+			.framedrop_pattern        = 0x00002754,
+			.frame_inc                = 0x00002758,
+			.burst_limit              = 0x0000275C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 6 */
+		{
+			.status0                  = 0x00002800,
+			.status1                  = 0x00002804,
+			.cfg                      = 0x00002808,
+			.header_addr              = 0x0000280C,
+			.header_cfg               = 0x00002810,
+			.image_addr               = 0x00002814,
+			.image_addr_offset        = 0x00002818,
+			.buffer_width_cfg         = 0x0000281C,
+			.buffer_height_cfg        = 0x00002820,
+			.packer_cfg               = 0x00002824,
+			.stride                   = 0x00002828,
+			.irq_subsample_period     = 0x00002848,
+			.irq_subsample_pattern    = 0x0000284C,
+			.framedrop_period         = 0x00002850,
+			.framedrop_pattern        = 0x00002854,
+			.frame_inc                = 0x00002858,
+			.burst_limit              = 0x0000285C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 7 */
+		{
+			.status0                  = 0x00002900,
+			.status1                  = 0x00002904,
+			.cfg                      = 0x00002908,
+			.header_addr              = 0x0000290C,
+			.header_cfg               = 0x00002910,
+			.image_addr               = 0x00002914,
+			.image_addr_offset        = 0x00002918,
+			.buffer_width_cfg         = 0x0000291C,
+			.buffer_height_cfg        = 0x00002920,
+			.packer_cfg               = 0x00002924,
+			.stride                   = 0x00002928,
+			.irq_subsample_period     = 0x00002948,
+			.irq_subsample_pattern    = 0x0000294C,
+			.framedrop_period         = 0x00002950,
+			.framedrop_pattern        = 0x00002954,
+			.frame_inc                = 0x00002958,
+			.burst_limit              = 0x0000295C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 8 */
+		{
+			.status0                  = 0x00002A00,
+			.status1                  = 0x00002A04,
+			.cfg                      = 0x00002A08,
+			.header_addr              = 0x00002A0C,
+			.header_cfg               = 0x00002A10,
+			.image_addr               = 0x00002A14,
+			.image_addr_offset        = 0x00002A18,
+			.buffer_width_cfg         = 0x00002A1C,
+			.buffer_height_cfg        = 0x00002A20,
+			.packer_cfg               = 0x00002A24,
+			.stride                   = 0x00002A28,
+			.irq_subsample_period     = 0x00002A48,
+			.irq_subsample_pattern    = 0x00002A4C,
+			.framedrop_period         = 0x00002A50,
+			.framedrop_pattern        = 0x00002A54,
+			.frame_inc                = 0x00002A58,
+			.burst_limit              = 0x00002A5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 9 */
+		{
+			.status0                  = 0x00002B00,
+			.status1                  = 0x00002B04,
+			.cfg                      = 0x00002B08,
+			.header_addr              = 0x00002B0C,
+			.header_cfg               = 0x00002B10,
+			.image_addr               = 0x00002B14,
+			.image_addr_offset        = 0x00002B18,
+			.buffer_width_cfg         = 0x00002B1C,
+			.buffer_height_cfg        = 0x00002B20,
+			.packer_cfg               = 0x00002B24,
+			.stride                   = 0x00002B28,
+			.irq_subsample_period     = 0x00002B48,
+			.irq_subsample_pattern    = 0x00002B4C,
+			.framedrop_period         = 0x00002B50,
+			.framedrop_pattern        = 0x00002B54,
+			.frame_inc                = 0x00002B58,
+			.burst_limit              = 0x00002B5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 10 */
+		{
+			.status0                  = 0x00002C00,
+			.status1                  = 0x00002C04,
+			.cfg                      = 0x00002C08,
+			.header_addr              = 0x00002C0C,
+			.header_cfg               = 0x00002C10,
+			.image_addr               = 0x00002C14,
+			.image_addr_offset        = 0x00002C18,
+			.buffer_width_cfg         = 0x00002C1C,
+			.buffer_height_cfg        = 0x00002C20,
+			.packer_cfg               = 0x00002C24,
+			.stride                   = 0x00002C28,
+			.irq_subsample_period     = 0x00002C48,
+			.irq_subsample_pattern    = 0x00002C4C,
+			.framedrop_period         = 0x00002C50,
+			.framedrop_pattern        = 0x00002C54,
+			.frame_inc                = 0x00002C58,
+			.burst_limit              = 0x00002C5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 11 */
+		{
+			.status0                  = 0x00002D00,
+			.status1                  = 0x00002D04,
+			.cfg                      = 0x00002D08,
+			.header_addr              = 0x00002D0C,
+			.header_cfg               = 0x00002D10,
+			.image_addr               = 0x00002D14,
+			.image_addr_offset        = 0x00002D18,
+			.buffer_width_cfg         = 0x00002D1C,
+			.buffer_height_cfg        = 0x00002D20,
+			.packer_cfg               = 0x00002D24,
+			.stride                   = 0x00002D28,
+			.irq_subsample_period     = 0x00002D48,
+			.irq_subsample_pattern    = 0x00002D4C,
+			.framedrop_period         = 0x00002D50,
+			.framedrop_pattern        = 0x00002D54,
+			.frame_inc                = 0x00002D58,
+			.burst_limit              = 0x00002D5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 12 */
+		{
+			.status0                  = 0x00002E00,
+			.status1                  = 0x00002E04,
+			.cfg                      = 0x00002E08,
+			.header_addr              = 0x00002E0C,
+			.header_cfg               = 0x00002E10,
+			.image_addr               = 0x00002E14,
+			.image_addr_offset        = 0x00002E18,
+			.buffer_width_cfg         = 0x00002E1C,
+			.buffer_height_cfg        = 0x00002E20,
+			.packer_cfg               = 0x00002E24,
+			.stride                   = 0x00002E28,
+			.irq_subsample_period     = 0x00002E48,
+			.irq_subsample_pattern    = 0x00002E4C,
+			.framedrop_period         = 0x00002E50,
+			.framedrop_pattern        = 0x00002E54,
+			.frame_inc                = 0x00002E58,
+			.burst_limit              = 0x00002E5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 13 */
+		{
+			.status0                  = 0x00002F00,
+			.status1                  = 0x00002F04,
+			.cfg                      = 0x00002F08,
+			.header_addr              = 0x00002F0C,
+			.header_cfg               = 0x00002F10,
+			.image_addr               = 0x00002F14,
+			.image_addr_offset        = 0x00002F18,
+			.buffer_width_cfg         = 0x00002F1C,
+			.buffer_height_cfg        = 0x00002F20,
+			.packer_cfg               = 0x00002F24,
+			.stride                   = 0x00002F28,
+			.irq_subsample_period     = 0x00002F48,
+			.irq_subsample_pattern    = 0x00002F4C,
+			.framedrop_period         = 0x00002F50,
+			.framedrop_pattern        = 0x00002F54,
+			.frame_inc                = 0x00002F58,
+			.burst_limit              = 0x00002F5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 14 */
+		{
+			.status0                  = 0x00003000,
+			.status1                  = 0x00003004,
+			.cfg                      = 0x00003008,
+			.header_addr              = 0x0000300C,
+			.header_cfg               = 0x00003010,
+			.image_addr               = 0x00003014,
+			.image_addr_offset        = 0x00003018,
+			.buffer_width_cfg         = 0x0000301C,
+			.buffer_height_cfg        = 0x00003020,
+			.packer_cfg               = 0x00003024,
+			.stride                   = 0x00003028,
+			.irq_subsample_period     = 0x00003048,
+			.irq_subsample_pattern    = 0x0000304C,
+			.framedrop_period         = 0x00003050,
+			.framedrop_pattern        = 0x00003054,
+			.frame_inc                = 0x00003058,
+			.burst_limit              = 0x0000305C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 15 */
+		{
+			.status0                  = 0x00003100,
+			.status1                  = 0x00003104,
+			.cfg                      = 0x00003108,
+			.header_addr              = 0x0000310C,
+			.header_cfg               = 0x00003110,
+			.image_addr               = 0x00003114,
+			.image_addr_offset        = 0x00003118,
+			.buffer_width_cfg         = 0x0000311C,
+			.buffer_height_cfg        = 0x00003120,
+			.packer_cfg               = 0x00003124,
+			.stride                   = 0x00003128,
+			.irq_subsample_period     = 0x00003148,
+			.irq_subsample_pattern    = 0x0000314C,
+			.framedrop_period         = 0x00003150,
+			.framedrop_pattern        = 0x00003154,
+			.frame_inc                = 0x00003158,
+			.burst_limit              = 0x0000315C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 16 */
+		{
+			.status0                  = 0x00003200,
+			.status1                  = 0x00003204,
+			.cfg                      = 0x00003208,
+			.header_addr              = 0x0000320C,
+			.header_cfg               = 0x00003210,
+			.image_addr               = 0x00003214,
+			.image_addr_offset        = 0x00003218,
+			.buffer_width_cfg         = 0x0000321C,
+			.buffer_height_cfg        = 0x00003220,
+			.packer_cfg               = 0x00003224,
+			.stride                   = 0x00003228,
+			.irq_subsample_period     = 0x00003248,
+			.irq_subsample_pattern    = 0x0000324C,
+			.framedrop_period         = 0x00003250,
+			.framedrop_pattern        = 0x00003254,
+			.frame_inc                = 0x00003258,
+			.burst_limit              = 0x0000325C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 17 */
+		{
+			.status0                  = 0x00003300,
+			.status1                  = 0x00003304,
+			.cfg                      = 0x00003308,
+			.header_addr              = 0x0000330C,
+			.header_cfg               = 0x00003310,
+			.image_addr               = 0x00003314,
+			.image_addr_offset        = 0x00003318,
+			.buffer_width_cfg         = 0x0000331C,
+			.buffer_height_cfg        = 0x00003320,
+			.packer_cfg               = 0x00003324,
+			.stride                   = 0x00003328,
+			.irq_subsample_period     = 0x00003348,
+			.irq_subsample_pattern    = 0x0000334C,
+			.framedrop_period         = 0x00003350,
+			.framedrop_pattern        = 0x00003354,
+			.frame_inc                = 0x00003358,
+			.burst_limit              = 0x0000335C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 18 */
+		{
+			.status0                  = 0x00003400,
+			.status1                  = 0x00003404,
+			.cfg                      = 0x00003408,
+			.header_addr              = 0x0000340C,
+			.header_cfg               = 0x00003410,
+			.image_addr               = 0x00003414,
+			.image_addr_offset        = 0x00003418,
+			.buffer_width_cfg         = 0x0000341C,
+			.buffer_height_cfg        = 0x00003420,
+			.packer_cfg               = 0x00003424,
+			.stride                   = 0x00003428,
+			.irq_subsample_period     = 0x00003448,
+			.irq_subsample_pattern    = 0x0000344C,
+			.framedrop_period         = 0x00003450,
+			.framedrop_pattern        = 0x00003454,
+			.frame_inc                = 0x00003458,
+			.burst_limit              = 0x0000345C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 19 */
+		{
+			.status0                  = 0x00003500,
+			.status1                  = 0x00003504,
+			.cfg                      = 0x00003508,
+			.header_addr              = 0x0000350C,
+			.header_cfg               = 0x00003510,
+			.image_addr               = 0x00003514,
+			.image_addr_offset        = 0x00003518,
+			.buffer_width_cfg         = 0x0000351C,
+			.buffer_height_cfg        = 0x00003520,
+			.packer_cfg               = 0x00003524,
+			.stride                   = 0x00003528,
+			.irq_subsample_period     = 0x00003548,
+			.irq_subsample_pattern    = 0x0000354C,
+			.framedrop_period         = 0x00003550,
+			.framedrop_pattern        = 0x00003554,
+			.frame_inc                = 0x00003558,
+			.burst_limit              = 0x0000355C,
+			.ubwc_regs                = NULL,
+		},
+	},
+	.comp_grp_reg = {
+		/* CAM_VFE_BUS_VER2_COMP_GRP_0 */
+		{
+			.comp_mask                    = 0x00002010,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_1 */
+		{
+			.comp_mask                    = 0x00002014,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_2 */
+		{
+			.comp_mask                    = 0x00002018,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_3 */
+		{
+			.comp_mask                    = 0x0000201C,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_4 */
+		{
+			.comp_mask                    = 0x00002020,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_5 */
+		{
+			.comp_mask                    = 0x00002024,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
+		{
+			.comp_mask                    = 0x0000202C,
+			.addr_sync_mask               = 0x00002088,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
+		{
+			.comp_mask                    = 0x00002030,
+			.addr_sync_mask               = 0x0000208C,
+
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
+		{
+			.comp_mask                    = 0x00002034,
+			.addr_sync_mask               = 0x00002090,
+
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
+		{
+			.comp_mask                    = 0x00002038,
+			.addr_sync_mask               = 0x00002094,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
+		{
+			.comp_mask                    = 0x0000203C,
+			.addr_sync_mask               = 0x00002098,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
+		{
+			.comp_mask                    = 0x00002040,
+			.addr_sync_mask               = 0x0000209C,
+		},
+	},
+	.num_out = 18,
+	.vfe_out_hw_info = {
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_FULL,
+			.max_width     = 4096,
+			.max_height    = 4096,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_DS4,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_DS16,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_FD,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_PDAF,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+	},
+};
+
+struct cam_vfe_hw_info cam_vfe170_hw_info = {
+	.irq_reg_info                  = &vfe170_top_irq_reg_info,
+
+	.bus_version                   = CAM_VFE_BUS_VER_2_0,
+	.bus_hw_info                   = &vfe170_bus_hw_info,
+
+	.top_version                   = CAM_VFE_TOP_VER_2_0,
+	.top_hw_info                   = &vfe170_top_hw_info,
+
+	.camif_version                 = CAM_VFE_CAMIF_VER_2_0,
+	.camif_reg                     = &vfe170_camif_reg,
+
+	.camif_lite_version            = 0,
+	.camif_reg                     = NULL,
+
+};
+
+#endif /* _CAM_VFE170_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
new file mode 100644
index 0000000..4d71d20
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe175.h
@@ -0,0 +1,999 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE175_H_
+#define _CAM_VFE175_H_
+
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_camif_lite_ver2.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_core.h"
+
+static struct cam_irq_register_set vfe175_top_irq_reg_set[2] = {
+	{
+		.mask_reg_offset   = 0x0000005C,
+		.clear_reg_offset  = 0x00000064,
+		.status_reg_offset = 0x0000006C,
+	},
+	{
+		.mask_reg_offset   = 0x00000060,
+		.clear_reg_offset  = 0x00000068,
+		.status_reg_offset = 0x00000070,
+	},
+};
+
+static struct cam_irq_controller_reg_info vfe175_top_irq_reg_info = {
+	.num_registers = 2,
+	.irq_reg_set = vfe175_top_irq_reg_set,
+	.global_clear_offset  = 0x00000058,
+	.global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_camif_ver2_reg vfe175_camif_reg = {
+	.camif_cmd                = 0x00000478,
+	.camif_config             = 0x0000047C,
+	.line_skip_pattern        = 0x00000488,
+	.pixel_skip_pattern       = 0x0000048C,
+	.skip_period              = 0x00000490,
+	.irq_subsample_pattern    = 0x0000049C,
+	.epoch_irq                = 0x000004A0,
+	.raw_crop_width_cfg       = 0x00000CE4,
+	.raw_crop_height_cfg      = 0x00000CE8,
+	.reg_update_cmd           = 0x000004AC,
+	.vfe_diag_config          = 0x00000C48,
+	.vfe_diag_sensor_status   = 0x00000C4C,
+};
+
+static struct cam_vfe_camif_reg_data vfe_175_camif_reg_data = {
+	.raw_crop_first_pixel_shift      = 16,
+	.raw_crop_first_pixel_mask       = 0xFFFF,
+	.raw_crop_last_pixel_shift       = 0x0,
+	.raw_crop_last_pixel_mask        = 0x3FFF,
+	.raw_crop_first_line_shift       = 16,
+	.raw_crop_first_line_mask        = 0xFFFF,
+	.raw_crop_last_line_shift        = 0,
+	.raw_crop_last_line_mask         = 0x3FFF,
+	.input_mux_sel_shift             = 5,
+	.input_mux_sel_mask              = 0x3,
+	.extern_reg_update_shift         = 4,
+	.extern_reg_update_mask          = 1,
+	.pixel_pattern_shift             = 0,
+	.pixel_pattern_mask              = 0x7,
+	.dsp_mode_shift                  = 23,
+	.dsp_mode_mask                   = 0x1,
+	.dsp_en_shift                    = 3,
+	.dsp_en_mask                     = 0x1,
+	.reg_update_cmd_data             = 0x1,
+	.epoch_line_cfg                  = 0x00140014,
+	.sof_irq_mask                    = 0x00000001,
+	.epoch0_irq_mask                 = 0x00000004,
+	.reg_update_irq_mask             = 0x00000010,
+	.eof_irq_mask                    = 0x00000002,
+	.error_irq_mask0                 = 0x0003FC00,
+	.error_irq_mask1                 = 0xEFFF7E80,
+	.enable_diagnostic_hw            = 0x1,
+};
+
+static struct cam_vfe_camif_lite_ver2_reg vfe175_camif_lite_reg = {
+	.camif_lite_cmd                = 0x00000FC0,
+	.camif_lite_config             = 0x00000FC4,
+	.lite_skip_period              = 0x00000FC8,
+	.lite_irq_subsample_pattern    = 0x00000FCC,
+	.lite_epoch_irq                = 0x00000FD0,
+	.reg_update_cmd                = 0x000004AC,
+};
+
+static struct cam_vfe_camif_lite_ver2_reg_data vfe175_camif_lite_reg_data = {
+	.dual_pd_reg_update_cmd_data     = 0x20,
+	.lite_epoch_line_cfg             = 0x00140014,
+	.lite_sof_irq_mask               = 0x00040000,
+	.lite_epoch0_irq_mask            = 0x00100000,
+	.dual_pd_reg_upd_irq_mask        = 0x04000000,
+	.lite_eof_irq_mask               = 0x00080000,
+	.lite_error_irq_mask0            = 0x00400000,
+	.lite_error_irq_mask1            = 0x00004100,
+	.extern_reg_update_shift         = 4,
+	.dual_pd_path_sel_shift          = 24,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl lens_175_reg = {
+	.reset    = 0x0000001C,
+	.cgc_ovd  = 0x0000002C,
+	.enable   = 0x00000040,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl stats_175_reg = {
+	.reset    = 0x00000020,
+	.cgc_ovd  = 0x00000030,
+	.enable   = 0x00000044,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl color_175_reg = {
+	.reset    = 0x00000024,
+	.cgc_ovd  = 0x00000034,
+	.enable   = 0x00000048,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl zoom_175_reg = {
+	.reset    = 0x00000028,
+	.cgc_ovd  = 0x00000038,
+	.enable   = 0x0000004C,
+};
+
+static struct cam_vfe_top_ver2_reg_offset_common vfe175_top_common_reg = {
+	.hw_version               = 0x00000000,
+	.hw_capability            = 0x00000004,
+	.lens_feature             = 0x00000008,
+	.stats_feature            = 0x0000000C,
+	.color_feature            = 0x00000010,
+	.zoom_feature             = 0x00000014,
+	.global_reset_cmd         = 0x00000018,
+	.module_ctrl              = {
+		&lens_175_reg,
+		&stats_175_reg,
+		&color_175_reg,
+		&zoom_175_reg,
+	},
+	.bus_cgc_ovd              = 0x0000003C,
+	.core_cfg                 = 0x00000050,
+	.three_D_cfg              = 0x00000054,
+	.violation_status         = 0x0000007C,
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_ver2_reg vfe175_rdi_reg = {
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_175_rdi_0_data = {
+	.reg_update_cmd_data      = 0x2,
+	.sof_irq_mask             = 0x8000000,
+	.reg_update_irq_mask      = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_175_rdi_1_data = {
+	.reg_update_cmd_data      = 0x4,
+	.sof_irq_mask             = 0x10000000,
+	.reg_update_irq_mask      = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe_175_rdi_2_data = {
+	.reg_update_cmd_data      = 0x8,
+	.sof_irq_mask             = 0x20000000,
+	.reg_update_irq_mask      = 0x80,
+};
+
+static struct cam_vfe_top_ver2_hw_info vfe175_top_hw_info = {
+	.common_reg = &vfe175_top_common_reg,
+	.camif_hw_info = {
+		.common_reg     = &vfe175_top_common_reg,
+		.camif_reg      = &vfe175_camif_reg,
+		.reg_data       = &vfe_175_camif_reg_data,
+		},
+	.camif_lite_hw_info = {
+		.common_reg     = &vfe175_top_common_reg,
+		.camif_lite_reg = &vfe175_camif_lite_reg,
+		.reg_data       = &vfe175_camif_lite_reg_data,
+		},
+	.rdi_hw_info = {
+		.common_reg = &vfe175_top_common_reg,
+		.rdi_reg    = &vfe175_rdi_reg,
+		.reg_data = {
+			&vfe_175_rdi_0_data,
+			&vfe_175_rdi_1_data,
+			&vfe_175_rdi_2_data,
+			NULL,
+			},
+		},
+	.mux_type = {
+		CAM_VFE_CAMIF_VER_2_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_CAMIF_LITE_VER_2_0,
+	},
+};
+
+static struct cam_irq_register_set vfe175_bus_irq_reg[3] = {
+		{
+			.mask_reg_offset   = 0x00002044,
+			.clear_reg_offset  = 0x00002050,
+			.status_reg_offset = 0x0000205C,
+		},
+		{
+			.mask_reg_offset   = 0x00002048,
+			.clear_reg_offset  = 0x00002054,
+			.status_reg_offset = 0x00002060,
+		},
+		{
+			.mask_reg_offset   = 0x0000204C,
+			.clear_reg_offset  = 0x00002058,
+			.status_reg_offset = 0x00002064,
+		},
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client
+	vfe175_ubwc_regs_client_3 = {
+	.tile_cfg         = 0x0000252C,
+	.h_init           = 0x00002530,
+	.v_init           = 0x00002534,
+	.meta_addr        = 0x00002538,
+	.meta_offset      = 0x0000253C,
+	.meta_stride      = 0x00002540,
+	.mode_cfg_0       = 0x00002544,
+	.mode_cfg_1       = 0x000025A4,
+	.bw_limit         = 0x000025A0,
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client
+	vfe175_ubwc_regs_client_4 = {
+	.tile_cfg         = 0x0000262C,
+	.h_init           = 0x00002630,
+	.v_init           = 0x00002634,
+	.meta_addr        = 0x00002638,
+	.meta_offset      = 0x0000263C,
+	.meta_stride      = 0x00002640,
+	.mode_cfg_0       = 0x00002644,
+	.mode_cfg_1       = 0x000026A4,
+	.bw_limit         = 0x000026A0,
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client
+	vfe175_ubwc_regs_client_20 = {
+	.tile_cfg         = 0x0000362C,
+	.h_init           = 0x00003630,
+	.v_init           = 0x00003634,
+	.meta_addr        = 0x00003638,
+	.meta_offset      = 0x0000363C,
+	.meta_stride      = 0x00003640,
+	.mode_cfg_0       = 0x00003644,
+	.mode_cfg_1       = 0x000036A4,
+	.bw_limit         = 0x000036A0,
+};
+
+static struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client
+	vfe175_ubwc_regs_client_21 = {
+	.tile_cfg         = 0x0000372C,
+	.h_init           = 0x00003730,
+	.v_init           = 0x00003734,
+	.meta_addr        = 0x00003738,
+	.meta_offset      = 0x0000373C,
+	.meta_stride      = 0x00003740,
+	.mode_cfg_0       = 0x00003744,
+	.mode_cfg_1       = 0x000037A4,
+	.bw_limit         = 0x000037A0,
+};
+
+static struct cam_vfe_bus_ver2_hw_info vfe175_bus_hw_info = {
+	.common_reg = {
+		.hw_version                   = 0x00002000,
+		.hw_capability                = 0x00002004,
+		.sw_reset                     = 0x00002008,
+		.cgc_ovd                      = 0x0000200C,
+		.pwr_iso_cfg                  = 0x000020CC,
+		.dual_master_comp_cfg         = 0x00002028,
+		.irq_reg_info = {
+			.num_registers        = 3,
+			.irq_reg_set          = vfe175_bus_irq_reg,
+			.global_clear_offset  = 0x00002068,
+			.global_clear_bitmask = 0x00000001,
+		},
+		.comp_error_status            = 0x0000206C,
+		.comp_ovrwr_status            = 0x00002070,
+		.dual_comp_error_status       = 0x00002074,
+		.dual_comp_ovrwr_status       = 0x00002078,
+		.addr_sync_cfg                = 0x0000207C,
+		.addr_sync_frame_hdr          = 0x00002080,
+		.addr_sync_no_sync            = 0x00002084,
+	},
+	.num_client = 24,
+	.bus_client_reg = {
+		/* BUS Client 0 */
+		{
+			.status0                  = 0x00002200,
+			.status1                  = 0x00002204,
+			.cfg                      = 0x00002208,
+			.header_addr              = 0x0000220C,
+			.header_cfg               = 0x00002210,
+			.image_addr               = 0x00002214,
+			.image_addr_offset        = 0x00002218,
+			.buffer_width_cfg         = 0x0000221C,
+			.buffer_height_cfg        = 0x00002220,
+			.packer_cfg               = 0x00002224,
+			.stride                   = 0x00002228,
+			.irq_subsample_period     = 0x00002248,
+			.irq_subsample_pattern    = 0x0000224C,
+			.framedrop_period         = 0x00002250,
+			.framedrop_pattern        = 0x00002254,
+			.frame_inc                = 0x00002258,
+			.burst_limit              = 0x0000225C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 1 */
+		{
+			.status0                  = 0x00002300,
+			.status1                  = 0x00002304,
+			.cfg                      = 0x00002308,
+			.header_addr              = 0x0000230C,
+			.header_cfg               = 0x00002310,
+			.image_addr               = 0x00002314,
+			.image_addr_offset        = 0x00002318,
+			.buffer_width_cfg         = 0x0000231C,
+			.buffer_height_cfg        = 0x00002320,
+			.packer_cfg               = 0x00002324,
+			.stride                   = 0x00002328,
+			.irq_subsample_period     = 0x00002348,
+			.irq_subsample_pattern    = 0x0000234C,
+			.framedrop_period         = 0x00002350,
+			.framedrop_pattern        = 0x00002354,
+			.frame_inc                = 0x00002358,
+			.burst_limit              = 0x0000235C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 2 */
+		{
+			.status0                  = 0x00002400,
+			.status1                  = 0x00002404,
+			.cfg                      = 0x00002408,
+			.header_addr              = 0x0000240C,
+			.header_cfg               = 0x00002410,
+			.image_addr               = 0x00002414,
+			.image_addr_offset        = 0x00002418,
+			.buffer_width_cfg         = 0x0000241C,
+			.buffer_height_cfg        = 0x00002420,
+			.packer_cfg               = 0x00002424,
+			.stride                   = 0x00002428,
+			.irq_subsample_period     = 0x00002448,
+			.irq_subsample_pattern    = 0x0000244C,
+			.framedrop_period         = 0x00002450,
+			.framedrop_pattern        = 0x00002454,
+			.frame_inc                = 0x00002458,
+			.burst_limit              = 0x0000245C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 3 */
+		{
+			.status0                  = 0x00002500,
+			.status1                  = 0x00002504,
+			.cfg                      = 0x00002508,
+			.header_addr              = 0x0000250C,
+			.header_cfg               = 0x00002510,
+			.image_addr               = 0x00002514,
+			.image_addr_offset        = 0x00002518,
+			.buffer_width_cfg         = 0x0000251C,
+			.buffer_height_cfg        = 0x00002520,
+			.packer_cfg               = 0x00002524,
+			.stride                   = 0x00002528,
+			.irq_subsample_period     = 0x00002548,
+			.irq_subsample_pattern    = 0x0000254C,
+			.framedrop_period         = 0x00002550,
+			.framedrop_pattern        = 0x00002554,
+			.frame_inc                = 0x00002558,
+			.burst_limit              = 0x0000255C,
+			.ubwc_regs                = &vfe175_ubwc_regs_client_3,
+		},
+		/* BUS Client 4 */
+		{
+			.status0                  = 0x00002600,
+			.status1                  = 0x00002604,
+			.cfg                      = 0x00002608,
+			.header_addr              = 0x0000260C,
+			.header_cfg               = 0x00002610,
+			.image_addr               = 0x00002614,
+			.image_addr_offset        = 0x00002618,
+			.buffer_width_cfg         = 0x0000261C,
+			.buffer_height_cfg        = 0x00002620,
+			.packer_cfg               = 0x00002624,
+			.stride                   = 0x00002628,
+			.irq_subsample_period     = 0x00002648,
+			.irq_subsample_pattern    = 0x0000264C,
+			.framedrop_period         = 0x00002650,
+			.framedrop_pattern        = 0x00002654,
+			.frame_inc                = 0x00002658,
+			.burst_limit              = 0x0000265C,
+			.ubwc_regs                = &vfe175_ubwc_regs_client_4,
+		},
+		/* BUS Client 5 */
+		{
+			.status0                  = 0x00002700,
+			.status1                  = 0x00002704,
+			.cfg                      = 0x00002708,
+			.header_addr              = 0x0000270C,
+			.header_cfg               = 0x00002710,
+			.image_addr               = 0x00002714,
+			.image_addr_offset        = 0x00002718,
+			.buffer_width_cfg         = 0x0000271C,
+			.buffer_height_cfg        = 0x00002720,
+			.packer_cfg               = 0x00002724,
+			.stride                   = 0x00002728,
+			.irq_subsample_period     = 0x00002748,
+			.irq_subsample_pattern    = 0x0000274C,
+			.framedrop_period         = 0x00002750,
+			.framedrop_pattern        = 0x00002754,
+			.frame_inc                = 0x00002758,
+			.burst_limit              = 0x0000275C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 6 */
+		{
+			.status0                  = 0x00002800,
+			.status1                  = 0x00002804,
+			.cfg                      = 0x00002808,
+			.header_addr              = 0x0000280C,
+			.header_cfg               = 0x00002810,
+			.image_addr               = 0x00002814,
+			.image_addr_offset        = 0x00002818,
+			.buffer_width_cfg         = 0x0000281C,
+			.buffer_height_cfg        = 0x00002820,
+			.packer_cfg               = 0x00002824,
+			.stride                   = 0x00002828,
+			.irq_subsample_period     = 0x00002848,
+			.irq_subsample_pattern    = 0x0000284C,
+			.framedrop_period         = 0x00002850,
+			.framedrop_pattern        = 0x00002854,
+			.frame_inc                = 0x00002858,
+			.burst_limit              = 0x0000285C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 7 */
+		{
+			.status0                  = 0x00002900,
+			.status1                  = 0x00002904,
+			.cfg                      = 0x00002908,
+			.header_addr              = 0x0000290C,
+			.header_cfg               = 0x00002910,
+			.image_addr               = 0x00002914,
+			.image_addr_offset        = 0x00002918,
+			.buffer_width_cfg         = 0x0000291C,
+			.buffer_height_cfg        = 0x00002920,
+			.packer_cfg               = 0x00002924,
+			.stride                   = 0x00002928,
+			.irq_subsample_period     = 0x00002948,
+			.irq_subsample_pattern    = 0x0000294C,
+			.framedrop_period         = 0x00002950,
+			.framedrop_pattern        = 0x00002954,
+			.frame_inc                = 0x00002958,
+			.burst_limit              = 0x0000295C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 8 */
+		{
+			.status0                  = 0x00002A00,
+			.status1                  = 0x00002A04,
+			.cfg                      = 0x00002A08,
+			.header_addr              = 0x00002A0C,
+			.header_cfg               = 0x00002A10,
+			.image_addr               = 0x00002A14,
+			.image_addr_offset        = 0x00002A18,
+			.buffer_width_cfg         = 0x00002A1C,
+			.buffer_height_cfg        = 0x00002A20,
+			.packer_cfg               = 0x00002A24,
+			.stride                   = 0x00002A28,
+			.irq_subsample_period     = 0x00002A48,
+			.irq_subsample_pattern    = 0x00002A4C,
+			.framedrop_period         = 0x00002A50,
+			.framedrop_pattern        = 0x00002A54,
+			.frame_inc                = 0x00002A58,
+			.burst_limit              = 0x00002A5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 9 */
+		{
+			.status0                  = 0x00002B00,
+			.status1                  = 0x00002B04,
+			.cfg                      = 0x00002B08,
+			.header_addr              = 0x00002B0C,
+			.header_cfg               = 0x00002B10,
+			.image_addr               = 0x00002B14,
+			.image_addr_offset        = 0x00002B18,
+			.buffer_width_cfg         = 0x00002B1C,
+			.buffer_height_cfg        = 0x00002B20,
+			.packer_cfg               = 0x00002B24,
+			.stride                   = 0x00002B28,
+			.irq_subsample_period     = 0x00002B48,
+			.irq_subsample_pattern    = 0x00002B4C,
+			.framedrop_period         = 0x00002B50,
+			.framedrop_pattern        = 0x00002B54,
+			.frame_inc                = 0x00002B58,
+			.burst_limit              = 0x00002B5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 10 */
+		{
+			.status0                  = 0x00002C00,
+			.status1                  = 0x00002C04,
+			.cfg                      = 0x00002C08,
+			.header_addr              = 0x00002C0C,
+			.header_cfg               = 0x00002C10,
+			.image_addr               = 0x00002C14,
+			.image_addr_offset        = 0x00002C18,
+			.buffer_width_cfg         = 0x00002C1C,
+			.buffer_height_cfg        = 0x00002C20,
+			.packer_cfg               = 0x00002C24,
+			.stride                   = 0x00002C28,
+			.irq_subsample_period     = 0x00002C48,
+			.irq_subsample_pattern    = 0x00002C4C,
+			.framedrop_period         = 0x00002C50,
+			.framedrop_pattern        = 0x00002C54,
+			.frame_inc                = 0x00002C58,
+			.burst_limit              = 0x00002C5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 11 */
+		{
+			.status0                  = 0x00002D00,
+			.status1                  = 0x00002D04,
+			.cfg                      = 0x00002D08,
+			.header_addr              = 0x00002D0C,
+			.header_cfg               = 0x00002D10,
+			.image_addr               = 0x00002D14,
+			.image_addr_offset        = 0x00002D18,
+			.buffer_width_cfg         = 0x00002D1C,
+			.buffer_height_cfg        = 0x00002D20,
+			.packer_cfg               = 0x00002D24,
+			.stride                   = 0x00002D28,
+			.irq_subsample_period     = 0x00002D48,
+			.irq_subsample_pattern    = 0x00002D4C,
+			.framedrop_period         = 0x00002D50,
+			.framedrop_pattern        = 0x00002D54,
+			.frame_inc                = 0x00002D58,
+			.burst_limit              = 0x00002D5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 12 */
+		{
+			.status0                  = 0x00002E00,
+			.status1                  = 0x00002E04,
+			.cfg                      = 0x00002E08,
+			.header_addr              = 0x00002E0C,
+			.header_cfg               = 0x00002E10,
+			.image_addr               = 0x00002E14,
+			.image_addr_offset        = 0x00002E18,
+			.buffer_width_cfg         = 0x00002E1C,
+			.buffer_height_cfg        = 0x00002E20,
+			.packer_cfg               = 0x00002E24,
+			.stride                   = 0x00002E28,
+			.irq_subsample_period     = 0x00002E48,
+			.irq_subsample_pattern    = 0x00002E4C,
+			.framedrop_period         = 0x00002E50,
+			.framedrop_pattern        = 0x00002E54,
+			.frame_inc                = 0x00002E58,
+			.burst_limit              = 0x00002E5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 13 */
+		{
+			.status0                  = 0x00002F00,
+			.status1                  = 0x00002F04,
+			.cfg                      = 0x00002F08,
+			.header_addr              = 0x00002F0C,
+			.header_cfg               = 0x00002F10,
+			.image_addr               = 0x00002F14,
+			.image_addr_offset        = 0x00002F18,
+			.buffer_width_cfg         = 0x00002F1C,
+			.buffer_height_cfg        = 0x00002F20,
+			.packer_cfg               = 0x00002F24,
+			.stride                   = 0x00002F28,
+			.irq_subsample_period     = 0x00002F48,
+			.irq_subsample_pattern    = 0x00002F4C,
+			.framedrop_period         = 0x00002F50,
+			.framedrop_pattern        = 0x00002F54,
+			.frame_inc                = 0x00002F58,
+			.burst_limit              = 0x00002F5C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 14 */
+		{
+			.status0                  = 0x00003000,
+			.status1                  = 0x00003004,
+			.cfg                      = 0x00003008,
+			.header_addr              = 0x0000300C,
+			.header_cfg               = 0x00003010,
+			.image_addr               = 0x00003014,
+			.image_addr_offset        = 0x00003018,
+			.buffer_width_cfg         = 0x0000301C,
+			.buffer_height_cfg        = 0x00003020,
+			.packer_cfg               = 0x00003024,
+			.stride                   = 0x00003028,
+			.irq_subsample_period     = 0x00003048,
+			.irq_subsample_pattern    = 0x0000304C,
+			.framedrop_period         = 0x00003050,
+			.framedrop_pattern        = 0x00003054,
+			.frame_inc                = 0x00003058,
+			.burst_limit              = 0x0000305C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 15 */
+		{
+			.status0                  = 0x00003100,
+			.status1                  = 0x00003104,
+			.cfg                      = 0x00003108,
+			.header_addr              = 0x0000310C,
+			.header_cfg               = 0x00003110,
+			.image_addr               = 0x00003114,
+			.image_addr_offset        = 0x00003118,
+			.buffer_width_cfg         = 0x0000311C,
+			.buffer_height_cfg        = 0x00003120,
+			.packer_cfg               = 0x00003124,
+			.stride                   = 0x00003128,
+			.irq_subsample_period     = 0x00003148,
+			.irq_subsample_pattern    = 0x0000314C,
+			.framedrop_period         = 0x00003150,
+			.framedrop_pattern        = 0x00003154,
+			.frame_inc                = 0x00003158,
+			.burst_limit              = 0x0000315C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 16 */
+		{
+			.status0                  = 0x00003200,
+			.status1                  = 0x00003204,
+			.cfg                      = 0x00003208,
+			.header_addr              = 0x0000320C,
+			.header_cfg               = 0x00003210,
+			.image_addr               = 0x00003214,
+			.image_addr_offset        = 0x00003218,
+			.buffer_width_cfg         = 0x0000321C,
+			.buffer_height_cfg        = 0x00003220,
+			.packer_cfg               = 0x00003224,
+			.stride                   = 0x00003228,
+			.irq_subsample_period     = 0x00003248,
+			.irq_subsample_pattern    = 0x0000324C,
+			.framedrop_period         = 0x00003250,
+			.framedrop_pattern        = 0x00003254,
+			.frame_inc                = 0x00003258,
+			.burst_limit              = 0x0000325C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 17 */
+		{
+			.status0                  = 0x00003300,
+			.status1                  = 0x00003304,
+			.cfg                      = 0x00003308,
+			.header_addr              = 0x0000330C,
+			.header_cfg               = 0x00003310,
+			.image_addr               = 0x00003314,
+			.image_addr_offset        = 0x00003318,
+			.buffer_width_cfg         = 0x0000331C,
+			.buffer_height_cfg        = 0x00003320,
+			.packer_cfg               = 0x00003324,
+			.stride                   = 0x00003328,
+			.irq_subsample_period     = 0x00003348,
+			.irq_subsample_pattern    = 0x0000334C,
+			.framedrop_period         = 0x00003350,
+			.framedrop_pattern        = 0x00003354,
+			.frame_inc                = 0x00003358,
+			.burst_limit              = 0x0000335C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 18 */
+		{
+			.status0                  = 0x00003400,
+			.status1                  = 0x00003404,
+			.cfg                      = 0x00003408,
+			.header_addr              = 0x0000340C,
+			.header_cfg               = 0x00003410,
+			.image_addr               = 0x00003414,
+			.image_addr_offset        = 0x00003418,
+			.buffer_width_cfg         = 0x0000341C,
+			.buffer_height_cfg        = 0x00003420,
+			.packer_cfg               = 0x00003424,
+			.stride                   = 0x00003428,
+			.irq_subsample_period     = 0x00003448,
+			.irq_subsample_pattern    = 0x0000344C,
+			.framedrop_period         = 0x00003450,
+			.framedrop_pattern        = 0x00003454,
+			.frame_inc                = 0x00003458,
+			.burst_limit              = 0x0000345C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 19 */
+		{
+			.status0                  = 0x00003500,
+			.status1                  = 0x00003504,
+			.cfg                      = 0x00003508,
+			.header_addr              = 0x0000350C,
+			.header_cfg               = 0x00003510,
+			.image_addr               = 0x00003514,
+			.image_addr_offset        = 0x00003518,
+			.buffer_width_cfg         = 0x0000351C,
+			.buffer_height_cfg        = 0x00003520,
+			.packer_cfg               = 0x00003524,
+			.stride                   = 0x00003528,
+			.irq_subsample_period     = 0x00003548,
+			.irq_subsample_pattern    = 0x0000354C,
+			.framedrop_period         = 0x00003550,
+			.framedrop_pattern        = 0x00003554,
+			.frame_inc                = 0x00003558,
+			.burst_limit              = 0x0000355C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 20 */
+		{
+			.status0                  = 0x00003600,
+			.status1                  = 0x00003604,
+			.cfg                      = 0x00003608,
+			.header_addr              = 0x0000360C,
+			.header_cfg               = 0x00003610,
+			.image_addr               = 0x00003614,
+			.image_addr_offset        = 0x00003618,
+			.buffer_width_cfg         = 0x0000361C,
+			.buffer_height_cfg        = 0x00003620,
+			.packer_cfg               = 0x00003624,
+			.stride                   = 0x00003628,
+			.irq_subsample_period     = 0x00003648,
+			.irq_subsample_pattern    = 0x0000364C,
+			.framedrop_period         = 0x00003650,
+			.framedrop_pattern        = 0x00003654,
+			.frame_inc                = 0x00003658,
+			.burst_limit              = 0x0000365C,
+			.ubwc_regs                = &vfe175_ubwc_regs_client_20,
+		},
+		/* BUS Client 21 */
+		{
+			.status0                  = 0x00003700,
+			.status1                  = 0x00003704,
+			.cfg                      = 0x00003708,
+			.header_addr              = 0x0000370C,
+			.header_cfg               = 0x00003710,
+			.image_addr               = 0x00003714,
+			.image_addr_offset        = 0x00003718,
+			.buffer_width_cfg         = 0x0000371C,
+			.buffer_height_cfg        = 0x00003720,
+			.packer_cfg               = 0x00003724,
+			.stride                   = 0x00003728,
+			.irq_subsample_period     = 0x00003748,
+			.irq_subsample_pattern    = 0x0000374C,
+			.framedrop_period         = 0x00003750,
+			.framedrop_pattern        = 0x00003754,
+			.frame_inc                = 0x00003758,
+			.burst_limit              = 0x0000375C,
+			.ubwc_regs                = &vfe175_ubwc_regs_client_21,
+		},
+		/* BUS Client 22 */
+		{
+			.status0                  = 0x00003800,
+			.status1                  = 0x00003804,
+			.cfg                      = 0x00003808,
+			.header_addr              = 0x0000380C,
+			.header_cfg               = 0x00003810,
+			.image_addr               = 0x00003814,
+			.image_addr_offset        = 0x00003818,
+			.buffer_width_cfg         = 0x0000381C,
+			.buffer_height_cfg        = 0x00003820,
+			.packer_cfg               = 0x00003824,
+			.stride                   = 0x00003828,
+			.irq_subsample_period     = 0x00003848,
+			.irq_subsample_pattern    = 0x0000384C,
+			.framedrop_period         = 0x00003850,
+			.framedrop_pattern        = 0x00003854,
+			.frame_inc                = 0x00003858,
+			.burst_limit              = 0x0000385C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 23 */
+		{
+			.status0                  = 0x00003900,
+			.status1                  = 0x00003904,
+			.cfg                      = 0x00003908,
+			.header_addr              = 0x0000390C,
+			.header_cfg               = 0x00003910,
+			.image_addr               = 0x00003914,
+			.image_addr_offset        = 0x00003918,
+			.buffer_width_cfg         = 0x0000391C,
+			.buffer_height_cfg        = 0x00003920,
+			.packer_cfg               = 0x00003924,
+			.stride                   = 0x00003928,
+			.irq_subsample_period     = 0x00003948,
+			.irq_subsample_pattern    = 0x0000394C,
+			.framedrop_period         = 0x00003950,
+			.framedrop_pattern        = 0x00003954,
+			.frame_inc                = 0x00003958,
+			.burst_limit              = 0x0000395C,
+			.ubwc_regs                = NULL,
+		},
+	},
+	.comp_grp_reg = {
+		/* CAM_VFE_BUS_VER2_COMP_GRP_0 */
+		{
+			.comp_mask                    = 0x00002010,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_1 */
+		{
+			.comp_mask                    = 0x00002014,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_2 */
+		{
+			.comp_mask                    = 0x00002018,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_3 */
+		{
+			.comp_mask                    = 0x0000201C,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_4 */
+		{
+			.comp_mask                    = 0x00002020,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_5 */
+		{
+			.comp_mask                    = 0x00002024,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
+		{
+			.comp_mask                    = 0x0000202C,
+			.addr_sync_mask               = 0x00002088,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
+		{
+			.comp_mask                    = 0x00002030,
+			.addr_sync_mask               = 0x0000208C,
+
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
+		{
+			.comp_mask                    = 0x00002034,
+			.addr_sync_mask               = 0x00002090,
+
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
+		{
+			.comp_mask                    = 0x00002038,
+			.addr_sync_mask               = 0x00002094,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
+		{
+			.comp_mask                    = 0x0000203C,
+			.addr_sync_mask               = 0x00002098,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
+		{
+			.comp_mask                    = 0x00002040,
+			.addr_sync_mask               = 0x0000209C,
+		},
+	},
+	.num_out = 22,
+	.vfe_out_hw_info = {
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_FULL,
+			.max_width     = 4096,
+			.max_height    = 4096,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_DS4,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_DS16,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_FD,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_PDAF,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP,
+			.max_width     = 4096,
+			.max_height    = 4096,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_2PD,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+	},
+};
+
+struct cam_vfe_hw_info cam_vfe175_hw_info = {
+	.irq_reg_info                  = &vfe175_top_irq_reg_info,
+
+	.bus_version                   = CAM_VFE_BUS_VER_2_0,
+	.bus_hw_info                   = &vfe175_bus_hw_info,
+
+	.top_version                   = CAM_VFE_TOP_VER_2_0,
+	.top_hw_info                   = &vfe175_top_hw_info,
+
+	.camif_version                 = CAM_VFE_CAMIF_VER_2_0,
+	.camif_reg                     = &vfe175_camif_reg,
+
+	.camif_lite_version            = CAM_VFE_CAMIF_LITE_VER_2_0,
+	.camif_lite_reg                = &vfe175_camif_lite_reg,
+
+};
+
+#endif /* _CAM_VFE175_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c
new file mode 100644
index 0000000..be44dad
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include "cam_vfe170.h"
+#include "cam_vfe175.h"
+#include "cam_vfe_lite17x.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_core.h"
+#include "cam_vfe_dev.h"
+
+static const struct of_device_id cam_vfe_dt_match[] = {
+	{
+		.compatible = "qcom,vfe170",
+		.data = &cam_vfe170_hw_info,
+	},
+	{
+		.compatible = "qcom,vfe175",
+		.data = &cam_vfe175_hw_info,
+	},
+	{
+		.compatible = "qcom,vfe-lite170",
+		.data = &cam_vfe_lite17x_hw_info,
+	},
+	{
+		.compatible = "qcom,vfe-lite175",
+		.data = &cam_vfe_lite17x_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_vfe_dt_match);
+
+static struct platform_driver cam_vfe_driver = {
+	.probe = cam_vfe_probe,
+	.remove = cam_vfe_remove,
+	.driver = {
+		.name = "cam_vfe17x",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_vfe_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_vfe_init_module(void)
+{
+	return platform_driver_register(&cam_vfe_driver);
+}
+
+static void __exit cam_vfe_exit_module(void)
+{
+	platform_driver_unregister(&cam_vfe_driver);
+}
+
+module_init(cam_vfe_init_module);
+module_exit(cam_vfe_exit_module);
+MODULE_DESCRIPTION("CAM VFE17X driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h
new file mode 100644
index 0000000..88b1ff6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite17x.h
@@ -0,0 +1,329 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_LITE17X_H_
+#define _CAM_VFE_LITE17X_H_
+
+#include "cam_vfe_bus_ver2.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_core.h"
+
+static struct cam_irq_register_set vfe17x_top_irq_reg_set[2] = {
+	{
+		.mask_reg_offset   = 0x0000005C,
+		.clear_reg_offset  = 0x00000064,
+		.status_reg_offset = 0x0000006C,
+	},
+	{
+		.mask_reg_offset   = 0x00000060,
+		.clear_reg_offset  = 0x00000068,
+		.status_reg_offset = 0x00000070,
+	},
+};
+
+static struct cam_irq_controller_reg_info vfe17x_top_irq_reg_info = {
+	.num_registers = 2,
+	.irq_reg_set = vfe17x_top_irq_reg_set,
+	.global_clear_offset  = 0x00000058,
+	.global_clear_bitmask = 0x00000001,
+};
+
+static struct cam_vfe_top_ver2_reg_offset_common vfe17x_top_common_reg = {
+	.hw_version               = 0x00000000,
+	.hw_capability            = 0x00000004,
+	.lens_feature             = 0x00000008,
+	.stats_feature            = 0x0000000C,
+	.color_feature            = 0x00000010,
+	.zoom_feature             = 0x00000014,
+	.global_reset_cmd         = 0x00000018,
+	.module_ctrl              = {
+		NULL,
+		NULL,
+		NULL,
+		NULL,
+	},
+	.bus_cgc_ovd              = 0x0000003C,
+	.core_cfg                 = 0x00000000,
+	.three_D_cfg              = 0x00000000,
+	.violation_status         = 0x0000007C,
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_ver2_reg vfe17x_rdi_reg = {
+	.reg_update_cmd           = 0x000004AC,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe17x_rdi_0_data = {
+	.reg_update_cmd_data      = 0x2,
+	.sof_irq_mask             = 0x8000000,
+	.reg_update_irq_mask      = 0x20,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe17x_rdi_1_data = {
+	.reg_update_cmd_data      = 0x4,
+	.sof_irq_mask             = 0x10000000,
+	.reg_update_irq_mask      = 0x40,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe17x_rdi_2_data = {
+	.reg_update_cmd_data      = 0x8,
+	.sof_irq_mask             = 0x20000000,
+	.reg_update_irq_mask      = 0x80,
+};
+
+static struct cam_vfe_rdi_reg_data  vfe17x_rdi_3_data = {
+	.reg_update_cmd_data      = 0x10,
+	.sof_irq_mask             = 0x40000000,
+	.reg_update_irq_mask      = 0x100,
+};
+
+static struct cam_vfe_top_ver2_hw_info vfe17x_top_hw_info = {
+	.common_reg = &vfe17x_top_common_reg,
+	.camif_hw_info = {
+		.common_reg = NULL,
+		.camif_reg  = NULL,
+		.reg_data   = NULL,
+		},
+	.rdi_hw_info = {
+		.common_reg = &vfe17x_top_common_reg,
+		.rdi_reg    = &vfe17x_rdi_reg,
+		.reg_data = {
+			&vfe17x_rdi_0_data,
+			&vfe17x_rdi_1_data,
+			&vfe17x_rdi_2_data,
+			&vfe17x_rdi_3_data,
+			},
+		},
+	.mux_type = {
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+		CAM_VFE_RDI_VER_1_0,
+	},
+};
+
+static struct cam_irq_register_set vfe17x_bus_irq_reg[3] = {
+		{
+			.mask_reg_offset   = 0x00002044,
+			.clear_reg_offset  = 0x00002050,
+			.status_reg_offset = 0x0000205C,
+		},
+		{
+			.mask_reg_offset   = 0x00002048,
+			.clear_reg_offset  = 0x00002054,
+			.status_reg_offset = 0x00002060,
+		},
+		{
+			.mask_reg_offset   = 0x0000204C,
+			.clear_reg_offset  = 0x00002058,
+			.status_reg_offset = 0x00002064,
+		},
+};
+
+static struct cam_vfe_bus_ver2_hw_info vfe17x_bus_hw_info = {
+	.common_reg = {
+		.hw_version                   = 0x00002000,
+		.hw_capability                = 0x00002004,
+		.sw_reset                     = 0x00002008,
+		.cgc_ovd                      = 0x0000200C,
+		.pwr_iso_cfg                  = 0x000020CC,
+		.dual_master_comp_cfg         = 0x00002028,
+		.irq_reg_info = {
+			.num_registers        = 3,
+			.irq_reg_set          = vfe17x_bus_irq_reg,
+			.global_clear_offset  = 0x00002068,
+			.global_clear_bitmask = 0x00000001,
+		},
+		.comp_error_status            = 0x0000206C,
+		.comp_ovrwr_status            = 0x00002070,
+		.dual_comp_error_status       = 0x00002074,
+		.dual_comp_ovrwr_status       = 0x00002078,
+		.addr_sync_cfg                = 0x0000207C,
+		.addr_sync_frame_hdr          = 0x00002080,
+		.addr_sync_no_sync            = 0x00002084,
+	},
+	.num_client = 4,
+	.bus_client_reg = {
+		/* BUS Client 0 */
+		{
+			.status0                  = 0x00002200,
+			.status1                  = 0x00002204,
+			.cfg                      = 0x00002208,
+			.header_addr              = 0x0000220C,
+			.header_cfg               = 0x00002210,
+			.image_addr               = 0x00002214,
+			.image_addr_offset        = 0x00002218,
+			.buffer_width_cfg         = 0x0000221C,
+			.buffer_height_cfg        = 0x00002220,
+			.packer_cfg               = 0x00002224,
+			.stride                   = 0x00002228,
+			.irq_subsample_period     = 0x00002248,
+			.irq_subsample_pattern    = 0x0000224C,
+			.framedrop_period         = 0x00002250,
+			.framedrop_pattern        = 0x00002254,
+			.frame_inc                = 0x00002258,
+			.burst_limit              = 0x0000225C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 1 */
+		{
+			.status0                  = 0x00002300,
+			.status1                  = 0x00002304,
+			.cfg                      = 0x00002308,
+			.header_addr              = 0x0000230C,
+			.header_cfg               = 0x00002310,
+			.image_addr               = 0x00002314,
+			.image_addr_offset        = 0x00002318,
+			.buffer_width_cfg         = 0x0000231C,
+			.buffer_height_cfg        = 0x00002320,
+			.packer_cfg               = 0x00002324,
+			.stride                   = 0x00002328,
+			.irq_subsample_period     = 0x00002348,
+			.irq_subsample_pattern    = 0x0000234C,
+			.framedrop_period         = 0x00002350,
+			.framedrop_pattern        = 0x00002354,
+			.frame_inc                = 0x00002358,
+			.burst_limit              = 0x0000235C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 2 */
+		{
+			.status0                  = 0x00002400,
+			.status1                  = 0x00002404,
+			.cfg                      = 0x00002408,
+			.header_addr              = 0x0000240C,
+			.header_cfg               = 0x00002410,
+			.image_addr               = 0x00002414,
+			.image_addr_offset        = 0x00002418,
+			.buffer_width_cfg         = 0x0000241C,
+			.buffer_height_cfg        = 0x00002420,
+			.packer_cfg               = 0x00002424,
+			.stride                   = 0x00002428,
+			.irq_subsample_period     = 0x00002448,
+			.irq_subsample_pattern    = 0x0000244C,
+			.framedrop_period         = 0x00002450,
+			.framedrop_pattern        = 0x00002454,
+			.frame_inc                = 0x00002458,
+			.burst_limit              = 0x0000245C,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 3 */
+		{
+			.status0                  = 0x00002500,
+			.status1                  = 0x00002504,
+			.cfg                      = 0x00002508,
+			.header_addr              = 0x0000250C,
+			.header_cfg               = 0x00002510,
+			.image_addr               = 0x00002514,
+			.image_addr_offset        = 0x00002518,
+			.buffer_width_cfg         = 0x0000251C,
+			.buffer_height_cfg        = 0x00002520,
+			.packer_cfg               = 0x00002524,
+			.stride                   = 0x00002528,
+			.irq_subsample_period     = 0x00002548,
+			.irq_subsample_pattern    = 0x0000254C,
+			.framedrop_period         = 0x00002550,
+			.framedrop_pattern        = 0x00002554,
+			.frame_inc                = 0x00002558,
+			.burst_limit              = 0x0000255C,
+			.ubwc_regs                = NULL,
+		},
+	},
+	.comp_grp_reg = {
+		/* CAM_VFE_BUS_VER2_COMP_GRP_0 */
+		{
+			.comp_mask                    = 0x00002010,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_1 */
+		{
+			.comp_mask                    = 0x00002014,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_2 */
+		{
+			.comp_mask                    = 0x00002018,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_3 */
+		{
+			.comp_mask                    = 0x0000201C,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_4 */
+		{
+			.comp_mask                    = 0x00002020,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_5 */
+		{
+			.comp_mask                    = 0x00002024,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 */
+		{
+			.comp_mask                    = 0x0000202C,
+			.addr_sync_mask               = 0x00002088,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1 */
+		{
+			.comp_mask                    = 0x00002030,
+			.addr_sync_mask               = 0x0000208C,
+
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2 */
+		{
+			.comp_mask                    = 0x00002034,
+			.addr_sync_mask               = 0x00002090,
+
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3 */
+		{
+			.comp_mask                    = 0x00002038,
+			.addr_sync_mask               = 0x00002094,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4 */
+		{
+			.comp_mask                    = 0x0000203C,
+			.addr_sync_mask               = 0x00002098,
+		},
+		/* CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5 */
+		{
+			.comp_mask                    = 0x00002040,
+			.addr_sync_mask               = 0x0000209C,
+		},
+	},
+	.num_out = 4,
+	.vfe_out_hw_info = {
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER2_VFE_OUT_RDI3,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+	},
+};
+
+static struct cam_vfe_hw_info cam_vfe_lite17x_hw_info = {
+	.irq_reg_info                  = &vfe17x_top_irq_reg_info,
+
+	.bus_version                   = CAM_VFE_BUS_VER_2_0,
+	.bus_hw_info                   = &vfe17x_bus_hw_info,
+
+	.top_version                   = CAM_VFE_TOP_VER_2_0,
+	.top_hw_info                   = &vfe17x_top_hw_info,
+
+};
+
+#endif /* _CAM_VFE_LITE17X_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
new file mode 100644
index 0000000..00becb0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_bus.o cam_vfe_bus_ver2.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
new file mode 100644
index 0000000..9325831
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_vfe_bus.h"
+#include "cam_vfe_bus_ver1.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_debug_util.h"
+
+int cam_vfe_bus_init(uint32_t          bus_version,
+	struct cam_hw_soc_info        *soc_info,
+	struct cam_hw_intf            *hw_intf,
+	void                          *bus_hw_info,
+	void                          *vfe_irq_controller,
+	struct cam_vfe_bus           **vfe_bus)
+{
+	int rc = -ENODEV;
+
+	switch (bus_version) {
+	case CAM_VFE_BUS_VER_2_0:
+		rc = cam_vfe_bus_ver2_init(soc_info, hw_intf, bus_hw_info,
+			vfe_irq_controller, vfe_bus);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_vfe_bus_deinit(uint32_t        bus_version,
+	struct cam_vfe_bus           **vfe_bus)
+{
+	int rc = -ENODEV;
+
+	switch (bus_version) {
+	case CAM_VFE_BUS_VER_2_0:
+		rc = cam_vfe_bus_ver2_deinit(vfe_bus);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h
new file mode 100644
index 0000000..38bff4e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver1.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_BUS_VER1_H_
+#define _CAM_VFE_BUS_VER1_H_
+
+enum cam_vfe_bus_ver1_pingpong_id {
+	CAM_VFE_BUS_VER1_PING,
+	CAM_VFE_BUS_VER1_PONG,
+	CAM_VFE_BUS_VER1_PINGPONG_MAX,
+};
+
+enum cam_vfe_bus_ver1_wm_type {
+	CAM_VFE_BUS_WM_TYPE_IMAGE,
+	CAM_VFE_BUS_WM_TYPE_STATS,
+	CAM_VFE_BUS_WM_TYPE_MAX,
+};
+
+enum cam_vfe_bus_ver1_comp_grp_type {
+	CAM_VFE_BUS_VER1_COMP_GRP_IMG0,
+	CAM_VFE_BUS_VER1_COMP_GRP_IMG1,
+	CAM_VFE_BUS_VER1_COMP_GRP_IMG2,
+	CAM_VFE_BUS_VER1_COMP_GRP_IMG3,
+	CAM_VFE_BUS_VER1_COMP_GRP_STATS0,
+	CAM_VFE_BUS_VER1_COMP_GRP_STATS1,
+	CAM_VFE_BUS_VER1_COMP_GRP_MAX,
+};
+
+struct cam_vfe_bus_ver1_common_reg {
+	uint32_t cmd_offset;
+	uint32_t cfg_offset;
+	uint32_t io_fmt_offset;
+	uint32_t argb_cfg_offset;
+	uint32_t xbar_cfg0_offset;
+	uint32_t xbar_cfg1_offset;
+	uint32_t xbar_cfg2_offset;
+	uint32_t xbar_cfg3_offset;
+	uint32_t ping_pong_status_reg;
+};
+
+struct cam_vfe_bus_ver1_wm_reg {
+	uint32_t wm_cfg_offset;
+	uint32_t ping_addr_offset;
+	uint32_t ping_max_addr_offset;
+	uint32_t pong_addr_offset;
+	uint32_t pong_max_addr_offset;
+	uint32_t addr_cfg_offset;
+	uint32_t ub_cfg_offset;
+	uint32_t image_size_offset;
+	uint32_t buffer_cfg_offset;
+	uint32_t framedrop_pattern_offset;
+	uint32_t irq_subsample_pattern_offset;
+	uint32_t ping_pong_status_bit; /* 0 - 31 */
+	uint32_t composite_bit; /* 0 -31 */
+};
+
+struct cam_vfe_bus_ver1_wm_resource_data {
+	uint32_t             index;
+	uint32_t             wm_type;
+	uint32_t             res_type;
+
+	uint32_t             offset;
+	uint32_t             width;
+	uint32_t             height;
+	uint32_t             stride;
+	uint32_t             scanline;
+
+	uint32_t             burst_len;
+
+	uint32_t             framedrop_period;
+	uint32_t             framedrop_pattern;
+
+	uint32_t             buf_valid[CAM_VFE_BUS_VER1_PINGPONG_MAX];
+	uint32_t             ub_size;
+	uint32_t             ub_offset;
+
+	struct cam_vfe_bus_ver1_wm_reg  hw_regs;
+};
+
+struct cam_vfe_bus_ver1_comp_grp_reg {
+	enum cam_vfe_bus_ver1_comp_grp_type comp_grp_type;
+	uint32_t             comp_grp_offset;
+};
+
+struct cam_vfe_bus_ver1_comp_grp {
+	struct cam_vfe_bus_ver1_comp_grp_reg reg_info;
+	struct list_head     wm_list;
+	uint32_t             cur_bit_mask;
+};
+
+/*
+ * cam_vfe_bus_ver1_init()
+ *
+ * @Brief:                   Initialize Bus layer
+ *
+ * @mem_base:                Mapped base address of register space
+ * @hw_intf:                 HW Interface of HW to which this resource belongs
+ * @bus_hw_info:             BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller:      VFE IRQ Controller to use for subscribing to Top
+ *                           level IRQs
+ * @vfe_bus:                 Pointer to vfe_bus structure which will be filled
+ *                           and returned on successful initialize
+ */
+int cam_vfe_bus_ver1_init(
+	void __iomem                         *mem_base,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *vfe_irq_controller,
+	struct cam_vfe_bus                  **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_VER1_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
new file mode 100644
index 0000000..c745c04
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -0,0 +1,3605 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+#include "cam_hw_intf.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_bus_ver2.h"
+#include "cam_vfe_core.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+
+static const char drv_name[] = "vfe_bus";
+
+#define CAM_VFE_BUS_IRQ_REG0                     0
+#define CAM_VFE_BUS_IRQ_REG1                     1
+#define CAM_VFE_BUS_IRQ_REG2                     2
+#define CAM_VFE_BUS_IRQ_MAX                      3
+
+#define CAM_VFE_BUS_VER2_PAYLOAD_MAX             256
+
+#define CAM_VFE_BUS_SET_DEBUG_REG                0x82
+
+#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH               0xFF01
+#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE              0xFF01
+#define CAM_VFE_BUS_INTRA_CLIENT_MASK               0x3
+#define CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT    8
+#define CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL \
+	((1 << CAM_VFE_BUS_VER2_MAX_CLIENTS) - 1)
+
+#define ALIGNUP(value, alignment) \
+	((value + alignment - 1) / alignment * alignment)
+
+#define MAX_BUF_UPDATE_REG_NUM   \
+	((sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client) +  \
+	sizeof(struct cam_vfe_bus_ver2_reg_offset_ubwc_client))/4)
+#define MAX_REG_VAL_PAIR_SIZE    \
+	(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+
+#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val)    \
+	do {                                               \
+		buf_array[(index)++] = offset;             \
+		buf_array[(index)++] = val;                \
+	} while (0)
+
+static uint32_t bus_error_irq_mask[3] = {
+	0x7800,
+	0x0000,
+	0x0040,
+};
+
+enum cam_vfe_bus_packer_format {
+	PACKER_FMT_PLAIN_128                   = 0x0,
+	PACKER_FMT_PLAIN_8                     = 0x1,
+	PACKER_FMT_PLAIN_16_10BPP              = 0x2,
+	PACKER_FMT_PLAIN_16_12BPP              = 0x3,
+	PACKER_FMT_PLAIN_16_14BPP              = 0x4,
+	PACKER_FMT_PLAIN_16_16BPP              = 0x5,
+	PACKER_FMT_ARGB_10                     = 0x6,
+	PACKER_FMT_ARGB_12                     = 0x7,
+	PACKER_FMT_ARGB_14                     = 0x8,
+	PACKER_FMT_PLAIN_32_20BPP              = 0x9,
+	PACKER_FMT_PLAIN_64                    = 0xA,
+	PACKER_FMT_TP_10                       = 0xB,
+	PACKER_FMT_PLAIN_32_32BPP              = 0xC,
+	PACKER_FMT_PLAIN_8_ODD_EVEN            = 0xD,
+	PACKER_FMT_PLAIN_8_LSB_MSB_10          = 0xE,
+	PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN = 0xF,
+	PACKER_FMT_MAX                         = 0xF,
+};
+
+enum cam_vfe_bus_comp_grp_id {
+	CAM_VFE_BUS_COMP_GROUP_NONE = -EINVAL,
+	CAM_VFE_BUS_COMP_GROUP_ID_0 = 0x0,
+	CAM_VFE_BUS_COMP_GROUP_ID_1 = 0x1,
+	CAM_VFE_BUS_COMP_GROUP_ID_2 = 0x2,
+	CAM_VFE_BUS_COMP_GROUP_ID_3 = 0x3,
+	CAM_VFE_BUS_COMP_GROUP_ID_4 = 0x4,
+	CAM_VFE_BUS_COMP_GROUP_ID_5 = 0x5,
+};
+
+struct cam_vfe_bus_ver2_common_data {
+	uint32_t                                    core_index;
+	void __iomem                               *mem_base;
+	struct cam_hw_intf                         *hw_intf;
+	void                                       *bus_irq_controller;
+	void                                       *vfe_irq_controller;
+	struct cam_vfe_bus_ver2_reg_offset_common  *common_reg;
+	uint32_t                                    io_buf_update[
+		MAX_REG_VAL_PAIR_SIZE];
+
+	struct cam_vfe_bus_irq_evt_payload          evt_payload[
+		CAM_VFE_BUS_VER2_PAYLOAD_MAX];
+	struct list_head                            free_payload_list;
+	spinlock_t                                  spin_lock;
+	struct mutex                                bus_mutex;
+	uint32_t                                    secure_mode;
+	uint32_t                                    num_sec_out;
+	uint32_t                                    addr_no_sync;
+};
+
+struct cam_vfe_bus_ver2_wm_resource_data {
+	uint32_t             index;
+	struct cam_vfe_bus_ver2_common_data            *common_data;
+	struct cam_vfe_bus_ver2_reg_offset_bus_client  *hw_regs;
+	void                                *ctx;
+
+	uint32_t             irq_enabled;
+	bool                 init_cfg_done;
+	bool                 hfr_cfg_done;
+
+	uint32_t             offset;
+	uint32_t             width;
+	uint32_t             height;
+	uint32_t             stride;
+	uint32_t             format;
+	enum cam_vfe_bus_packer_format pack_fmt;
+
+	uint32_t             burst_len;
+
+	uint32_t             en_ubwc;
+	bool                 ubwc_updated;
+	uint32_t             packer_cfg;
+	uint32_t             tile_cfg;
+	uint32_t             h_init;
+	uint32_t             v_init;
+	uint32_t             ubwc_meta_stride;
+	uint32_t             ubwc_mode_cfg_0;
+	uint32_t             ubwc_mode_cfg_1;
+	uint32_t             ubwc_meta_offset;
+
+	uint32_t             irq_subsample_period;
+	uint32_t             irq_subsample_pattern;
+	uint32_t             framedrop_period;
+	uint32_t             framedrop_pattern;
+
+	uint32_t             en_cfg;
+	uint32_t             is_dual;
+};
+
+struct cam_vfe_bus_ver2_comp_grp_data {
+	enum cam_vfe_bus_ver2_comp_grp_type          comp_grp_type;
+	struct cam_vfe_bus_ver2_common_data         *common_data;
+	struct cam_vfe_bus_ver2_reg_offset_comp_grp *hw_regs;
+
+	uint32_t                         irq_enabled;
+	uint32_t                         comp_grp_local_idx;
+	uint32_t                         unique_id;
+
+	uint32_t                         is_master;
+	uint32_t                         dual_slave_core;
+	uint32_t                         intra_client_mask;
+	uint32_t                         composite_mask;
+	uint32_t                         addr_sync_mode;
+
+	uint32_t                         acquire_dev_cnt;
+	uint32_t                         irq_trigger_cnt;
+
+	void                            *ctx;
+};
+
+struct cam_vfe_bus_ver2_vfe_out_data {
+	uint32_t                              out_type;
+	struct cam_vfe_bus_ver2_common_data  *common_data;
+
+	uint32_t                         num_wm;
+	struct cam_isp_resource_node    *wm_res[PLANE_MAX];
+
+	struct cam_isp_resource_node    *comp_grp;
+	enum cam_isp_hw_sync_mode        dual_comp_sync_mode;
+	uint32_t                         dual_hw_alternate_vfe_id;
+	struct list_head                 vfe_out_list;
+
+	uint32_t                         format;
+	uint32_t                         max_width;
+	uint32_t                         max_height;
+	struct cam_cdm_utils_ops        *cdm_util_ops;
+	uint32_t                         secure_mode;
+};
+
+struct cam_vfe_bus_ver2_priv {
+	struct cam_vfe_bus_ver2_common_data common_data;
+	uint32_t                            num_client;
+	uint32_t                            num_out;
+
+	struct cam_isp_resource_node  bus_client[CAM_VFE_BUS_VER2_MAX_CLIENTS];
+	struct cam_isp_resource_node  comp_grp[CAM_VFE_BUS_VER2_COMP_GRP_MAX];
+	struct cam_isp_resource_node  vfe_out[CAM_VFE_BUS_VER2_VFE_OUT_MAX];
+
+	struct list_head                    free_comp_grp;
+	struct list_head                    free_dual_comp_grp;
+	struct list_head                    used_comp_grp;
+
+	uint32_t                            irq_handle;
+	uint32_t                            error_irq_handle;
+	void                               *tasklet_info;
+};
+
+static int cam_vfe_bus_process_cmd(
+	struct cam_isp_resource_node *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+
+static int cam_vfe_bus_get_evt_payload(
+	struct cam_vfe_bus_ver2_common_data  *common_data,
+	struct cam_vfe_bus_irq_evt_payload  **evt_payload)
+{
+	int rc;
+
+	spin_lock(&common_data->spin_lock);
+	if (list_empty(&common_data->free_payload_list)) {
+		*evt_payload = NULL;
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&common_data->free_payload_list,
+		struct cam_vfe_bus_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	rc = 0;
+done:
+	spin_unlock(&common_data->spin_lock);
+	return rc;
+}
+
+static enum cam_vfe_bus_comp_grp_id
+	cam_vfe_bus_comp_grp_id_convert(uint32_t comp_grp)
+{
+	switch (comp_grp) {
+	case CAM_ISP_RES_COMP_GROUP_ID_0:
+		return CAM_VFE_BUS_COMP_GROUP_ID_0;
+	case CAM_ISP_RES_COMP_GROUP_ID_1:
+		return CAM_VFE_BUS_COMP_GROUP_ID_1;
+	case CAM_ISP_RES_COMP_GROUP_ID_2:
+		return CAM_VFE_BUS_COMP_GROUP_ID_2;
+	case CAM_ISP_RES_COMP_GROUP_ID_3:
+		return CAM_VFE_BUS_COMP_GROUP_ID_3;
+	case CAM_ISP_RES_COMP_GROUP_ID_4:
+		return CAM_VFE_BUS_COMP_GROUP_ID_4;
+	case CAM_ISP_RES_COMP_GROUP_ID_5:
+		return CAM_VFE_BUS_COMP_GROUP_ID_5;
+	case CAM_ISP_RES_COMP_GROUP_NONE:
+	default:
+		return CAM_VFE_BUS_COMP_GROUP_NONE;
+	}
+}
+
+static int cam_vfe_bus_put_evt_payload(void     *core_info,
+	struct cam_vfe_bus_irq_evt_payload     **evt_payload)
+{
+	struct cam_vfe_bus_ver2_common_data *common_data = NULL;
+	uint32_t  *ife_irq_regs = NULL;
+	uint32_t   status_reg0, status_reg1, status_reg2;
+	unsigned long flags;
+
+	if (!core_info) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+	(*evt_payload)->error_type = 0;
+	ife_irq_regs = (*evt_payload)->irq_reg_val;
+	status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
+	status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
+	status_reg2 = ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
+
+	if (status_reg0 || status_reg1 || status_reg2) {
+		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
+			status_reg0, status_reg1, status_reg2);
+		return 0;
+	}
+
+	common_data = core_info;
+
+	spin_lock_irqsave(&common_data->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list,
+		&common_data->free_payload_list);
+	spin_unlock_irqrestore(&common_data->spin_lock, flags);
+
+	*evt_payload = NULL;
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_bus_ver2_get_intra_client_mask(
+	enum cam_vfe_bus_ver2_vfe_core_id  dual_slave_core,
+	enum cam_vfe_bus_ver2_vfe_core_id  current_core,
+	uint32_t                          *intra_client_mask)
+{
+	int rc = 0;
+	uint32_t camera_hw_version = 0;
+	uint32_t version_based_intra_client_mask = 0x1;
+
+	*intra_client_mask = 0;
+
+
+	if (dual_slave_core == current_core) {
+		CAM_ERR(CAM_ISP,
+			"Invalid params. Same core as Master and Slave");
+		return -EINVAL;
+	}
+
+	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+
+	CAM_DBG(CAM_ISP, "CPAS VERSION %d", camera_hw_version);
+
+	switch (camera_hw_version) {
+	case CAM_CPAS_TITAN_170_V100:
+		version_based_intra_client_mask = 0x3;
+		break;
+	default:
+		version_based_intra_client_mask = 0x1;
+		break;
+	}
+
+
+	switch (current_core) {
+	case CAM_VFE_BUS_VER2_VFE_CORE_0:
+		switch (dual_slave_core) {
+		case CAM_VFE_BUS_VER2_VFE_CORE_1:
+			*intra_client_mask = version_based_intra_client_mask;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
+				dual_slave_core);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_CORE_1:
+		switch (dual_slave_core) {
+		case CAM_VFE_BUS_VER2_VFE_CORE_0:
+			*intra_client_mask = version_based_intra_client_mask;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
+				dual_slave_core);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	default:
+		CAM_ERR(CAM_ISP,
+			"Invalid value for master core %u", current_core);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static bool cam_vfe_bus_can_be_secure(uint32_t out_type)
+{
+	switch (out_type) {
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+	case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP:
+		return true;
+
+	case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_2PD:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+	default:
+		return false;
+	}
+}
+
+static enum cam_vfe_bus_ver2_vfe_out_type
+	cam_vfe_bus_get_out_res_id(uint32_t res_type)
+{
+	switch (res_type) {
+	case CAM_ISP_IFE_OUT_RES_FULL:
+		return CAM_VFE_BUS_VER2_VFE_OUT_FULL;
+	case CAM_ISP_IFE_OUT_RES_DS4:
+		return CAM_VFE_BUS_VER2_VFE_OUT_DS4;
+	case CAM_ISP_IFE_OUT_RES_DS16:
+		return CAM_VFE_BUS_VER2_VFE_OUT_DS16;
+	case CAM_ISP_IFE_OUT_RES_FD:
+		return CAM_VFE_BUS_VER2_VFE_OUT_FD;
+	case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP;
+	case CAM_ISP_IFE_OUT_RES_PDAF:
+		return CAM_VFE_BUS_VER2_VFE_OUT_PDAF;
+	case CAM_ISP_IFE_OUT_RES_2PD:
+		return CAM_VFE_BUS_VER2_VFE_OUT_2PD;
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RDI0;
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RDI1;
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RDI2;
+	case CAM_ISP_IFE_OUT_RES_RDI_3:
+		return CAM_VFE_BUS_VER2_VFE_OUT_RDI3;
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE;
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST;
+	case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG;
+	case CAM_ISP_IFE_OUT_RES_STATS_BF:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF;
+	case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG;
+	case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST;
+	case CAM_ISP_IFE_OUT_RES_STATS_RS:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS;
+	case CAM_ISP_IFE_OUT_RES_STATS_CS:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS;
+	case CAM_ISP_IFE_OUT_RES_STATS_IHIST:
+		return CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST;
+	case CAM_ISP_IFE_OUT_RES_FULL_DISP:
+		return CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP;
+	case CAM_ISP_IFE_OUT_RES_DS4_DISP:
+		return CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP;
+	case CAM_ISP_IFE_OUT_RES_DS16_DISP:
+		return CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP;
+	default:
+		return CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+	}
+}
+
+static int cam_vfe_bus_get_num_wm(
+	enum cam_vfe_bus_ver2_vfe_out_type    res_type,
+	uint32_t                              format)
+{
+	switch (res_type) {
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI3:
+		switch (format) {
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+		case CAM_FORMAT_MIPI_RAW_14:
+		case CAM_FORMAT_MIPI_RAW_16:
+		case CAM_FORMAT_MIPI_RAW_20:
+		case CAM_FORMAT_DPCM_10_6_10:
+		case CAM_FORMAT_DPCM_10_8_10:
+		case CAM_FORMAT_DPCM_12_6_12:
+		case CAM_FORMAT_DPCM_12_8_12:
+		case CAM_FORMAT_DPCM_14_8_14:
+		case CAM_FORMAT_DPCM_14_10_14:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+		case CAM_FORMAT_PLAIN32_20:
+		case CAM_FORMAT_PLAIN128:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP:
+		switch (format) {
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_TP10:
+		case CAM_FORMAT_UBWC_NV12:
+		case CAM_FORMAT_UBWC_NV12_4R:
+		case CAM_FORMAT_UBWC_TP10:
+		case CAM_FORMAT_UBWC_P010:
+		case CAM_FORMAT_PLAIN16_10:
+			return 2;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+		switch (format) {
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_TP10:
+		case CAM_FORMAT_PLAIN16_10:
+			return 2;
+		case CAM_FORMAT_Y_ONLY:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP:
+		switch (format) {
+		case CAM_FORMAT_PD8:
+		case CAM_FORMAT_PD10:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+		switch (format) {
+		case CAM_FORMAT_ARGB_14:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+		switch (format) {
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_2PD:
+		switch (format) {
+		case CAM_FORMAT_PLAIN16_8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+		case CAM_FORMAT_PLAIN64:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+		switch (format) {
+		case CAM_FORMAT_PLAIN64:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+		switch (format) {
+		case CAM_FORMAT_PLAIN16_16:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	default:
+		break;
+	}
+
+	CAM_ERR(CAM_ISP, "Unsupported format %u for resource_type %u",
+		format, res_type);
+
+	return -EINVAL;
+}
+
+static int cam_vfe_bus_get_wm_idx(
+	enum cam_vfe_bus_ver2_vfe_out_type vfe_out_res_id,
+	enum cam_vfe_bus_plane_type plane)
+{
+	int wm_idx = -1;
+
+	switch (vfe_out_res_id) {
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI0:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 0;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI1:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI2:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RDI3:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 3;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 3;
+			break;
+		case PLANE_C:
+			wm_idx = 4;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 5;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 6;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_FD:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 7;
+			break;
+		case PLANE_C:
+			wm_idx = 8;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 9;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_PDAF:
+	case CAM_VFE_BUS_VER2_VFE_OUT_2PD:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 10;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 11;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 12;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 13;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 14;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 15;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 16;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 17;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 18;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 19;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 20;
+			break;
+		case PLANE_C:
+			wm_idx = 21;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 22;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 23;
+			break;
+		default:
+			break;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+	return wm_idx;
+}
+
+static enum cam_vfe_bus_packer_format
+	cam_vfe_bus_get_packer_fmt(uint32_t out_fmt, int wm_index)
+{
+	switch (out_fmt) {
+	case CAM_FORMAT_NV21:
+		if ((wm_index == 4) || (wm_index == 6) || (wm_index == 21))
+			return PACKER_FMT_PLAIN_8_LSB_MSB_10_ODD_EVEN;
+	case CAM_FORMAT_NV12:
+	case CAM_FORMAT_UBWC_NV12:
+	case CAM_FORMAT_UBWC_NV12_4R:
+	case CAM_FORMAT_Y_ONLY:
+		return PACKER_FMT_PLAIN_8_LSB_MSB_10;
+	case CAM_FORMAT_PLAIN16_16:
+		return PACKER_FMT_PLAIN_16_16BPP;
+	case CAM_FORMAT_PLAIN64:
+		return PACKER_FMT_PLAIN_64;
+	case CAM_FORMAT_PLAIN8:
+		return PACKER_FMT_PLAIN_8;
+	case CAM_FORMAT_PLAIN16_10:
+		return PACKER_FMT_PLAIN_16_10BPP;
+	case CAM_FORMAT_PLAIN16_12:
+		return PACKER_FMT_PLAIN_16_12BPP;
+	case CAM_FORMAT_PLAIN16_14:
+		return PACKER_FMT_PLAIN_16_14BPP;
+	case CAM_FORMAT_PLAIN32_20:
+		return PACKER_FMT_PLAIN_32_20BPP;
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_PLAIN16_8:
+	case CAM_FORMAT_PLAIN128:
+	case CAM_FORMAT_PD8:
+	case CAM_FORMAT_PD10:
+		return PACKER_FMT_PLAIN_128;
+	case CAM_FORMAT_UBWC_TP10:
+	case CAM_FORMAT_TP10:
+		return PACKER_FMT_TP_10;
+	case CAM_FORMAT_ARGB_14:
+		return PACKER_FMT_ARGB_14;
+	default:
+		return PACKER_FMT_MAX;
+	}
+}
+
+static int cam_vfe_bus_acquire_wm(
+	struct cam_vfe_bus_ver2_priv          *ver2_bus_priv,
+	struct cam_isp_out_port_info          *out_port_info,
+	void                                  *tasklet,
+	void                                  *ctx,
+	enum cam_vfe_bus_ver2_vfe_out_type     vfe_out_res_id,
+	enum cam_vfe_bus_plane_type            plane,
+	uint32_t                               subscribe_irq,
+	struct cam_isp_resource_node         **wm_res,
+	uint32_t                              *client_done_mask,
+	uint32_t                               is_dual)
+{
+	uint32_t wm_idx = 0;
+	struct cam_isp_resource_node              *wm_res_local = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data  *rsrc_data = NULL;
+
+	*wm_res = NULL;
+	*client_done_mask = 0;
+
+	/* No need to allocate for BUS VER2. VFE OUT to WM is fixed. */
+	wm_idx = cam_vfe_bus_get_wm_idx(vfe_out_res_id, plane);
+	if (wm_idx < 0 || wm_idx >= ver2_bus_priv->num_client) {
+		CAM_ERR(CAM_ISP, "Unsupported VFE out %d plane %d",
+			vfe_out_res_id, plane);
+		return -EINVAL;
+	}
+
+	wm_res_local = &ver2_bus_priv->bus_client[wm_idx];
+	if (wm_res_local->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "WM res not available state:%d",
+			wm_res_local->res_state);
+		return -EALREADY;
+	}
+	wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	wm_res_local->tasklet_info = tasklet;
+
+	rsrc_data = wm_res_local->res_priv;
+	rsrc_data->irq_enabled = subscribe_irq;
+	rsrc_data->ctx = ctx;
+	rsrc_data->format = out_port_info->format;
+	rsrc_data->pack_fmt = cam_vfe_bus_get_packer_fmt(rsrc_data->format,
+		wm_idx);
+
+	rsrc_data->width = out_port_info->width;
+	rsrc_data->height = out_port_info->height;
+	rsrc_data->is_dual = is_dual;
+	/* Set WM offset value to default */
+	rsrc_data->offset  = 0;
+	CAM_DBG(CAM_ISP, "WM %d width %d height %d", rsrc_data->index,
+		rsrc_data->width, rsrc_data->height);
+
+	if (rsrc_data->index < 3) {
+		/* Write master 0-2 refers to RDI 0/ RDI 1/RDI 2 */
+		switch (rsrc_data->format) {
+		case CAM_FORMAT_MIPI_RAW_6:
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+		case CAM_FORMAT_MIPI_RAW_14:
+		case CAM_FORMAT_MIPI_RAW_16:
+		case CAM_FORMAT_MIPI_RAW_20:
+		case CAM_FORMAT_PLAIN128:
+			rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+			rsrc_data->height = 0;
+			rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
+			rsrc_data->pack_fmt = 0x0;
+			rsrc_data->en_cfg = 0x3;
+			break;
+		case CAM_FORMAT_PLAIN8:
+			rsrc_data->en_cfg = 0x1;
+			rsrc_data->pack_fmt = 0x1;
+			rsrc_data->width = rsrc_data->width * 2;
+			rsrc_data->stride = rsrc_data->width;
+			break;
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+		case CAM_FORMAT_PLAIN32_20:
+			rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+			rsrc_data->height = 0;
+			rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
+			rsrc_data->pack_fmt = 0x0;
+			rsrc_data->en_cfg = 0x3;
+			break;
+		case CAM_FORMAT_PLAIN64:
+			rsrc_data->en_cfg = 0x1;
+			rsrc_data->pack_fmt = 0xA;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Unsupported RDI format %d",
+				rsrc_data->format);
+			return -EINVAL;
+		}
+	} else if ((rsrc_data->index < 5) ||
+		(rsrc_data->index == 7) || (rsrc_data->index == 8) ||
+		(rsrc_data->index == 20) || (rsrc_data->index == 21)) {
+		/*
+		 * Write master 3, 4 - for Full OUT , 7-8  FD OUT,
+		 * WM 20-21 = FULL_DISP
+		 */
+		switch (rsrc_data->format) {
+		case CAM_FORMAT_UBWC_NV12_4R:
+			rsrc_data->en_ubwc = 1;
+			rsrc_data->width = ALIGNUP(rsrc_data->width, 64);
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			break;
+		case CAM_FORMAT_UBWC_NV12:
+			rsrc_data->en_ubwc = 1;
+			/* Fall through for NV12 */
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+		case CAM_FORMAT_Y_ONLY:
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			break;
+		case CAM_FORMAT_UBWC_TP10:
+			rsrc_data->en_ubwc = 1;
+			rsrc_data->width =
+				ALIGNUP(rsrc_data->width, 48) * 4 / 3;
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			break;
+		case CAM_FORMAT_TP10:
+			rsrc_data->width =
+				ALIGNUP(rsrc_data->width, 3) * 4 / 3;
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			break;
+		case CAM_FORMAT_PLAIN16_10:
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			rsrc_data->width *= 2;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid format %d",
+				rsrc_data->format);
+			return -EINVAL;
+		}
+		rsrc_data->en_cfg = 0x1;
+	} else if (rsrc_data->index >= 11 && rsrc_data->index < 20) {
+		/* Write master 11 - 19 stats */
+		rsrc_data->width = 0;
+		rsrc_data->height = 0;
+		rsrc_data->stride = 1;
+		rsrc_data->en_cfg = 0x3;
+	} else if (rsrc_data->index == 10) {
+		/* Write master 10 - PDAF/2PD */
+		rsrc_data->width = 0;
+		rsrc_data->height = 0;
+		rsrc_data->stride = 1;
+		rsrc_data->en_cfg = 0x3;
+		if (vfe_out_res_id == CAM_VFE_BUS_VER2_VFE_OUT_PDAF)
+			/* LSB aligned */
+			rsrc_data->pack_fmt |= 0x10;
+	}  else if (rsrc_data->index == 9) {
+		/* Write master 9 - Raw dump */
+		rsrc_data->width = rsrc_data->width * 2;
+		rsrc_data->stride = rsrc_data->width;
+		rsrc_data->en_cfg = 0x1;
+		/* LSB aligned */
+		rsrc_data->pack_fmt |= 0x10;
+	}  else {
+		/* Write master 5-6 DS ports */
+		uint32_t align_width;
+
+		rsrc_data->width = rsrc_data->width * 4;
+		rsrc_data->height = rsrc_data->height / 2;
+		rsrc_data->en_cfg = 0x1;
+		CAM_DBG(CAM_ISP, "before width %d", rsrc_data->width);
+		align_width = ALIGNUP(rsrc_data->width, 16);
+		if (align_width != rsrc_data->width) {
+			CAM_WARN(CAM_ISP,
+				"Override width %u with expected %u",
+				rsrc_data->width, align_width);
+			rsrc_data->width = align_width;
+		}
+	}
+
+	*client_done_mask = (1 << wm_idx);
+	*wm_res = wm_res_local;
+
+	CAM_DBG(CAM_ISP, "WM %d: processed width %d, processed  height %d",
+		rsrc_data->index, rsrc_data->width, rsrc_data->height);
+	return 0;
+}
+
+static int cam_vfe_bus_release_wm(void   *bus_priv,
+	struct cam_isp_resource_node     *wm_res)
+{
+	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+
+	rsrc_data->irq_enabled = 0;
+	rsrc_data->offset = 0;
+	rsrc_data->width = 0;
+	rsrc_data->height = 0;
+	rsrc_data->stride = 0;
+	rsrc_data->format = 0;
+	rsrc_data->pack_fmt = 0;
+	rsrc_data->burst_len = 0;
+	rsrc_data->irq_subsample_period = 0;
+	rsrc_data->irq_subsample_pattern = 0;
+	rsrc_data->framedrop_period = 0;
+	rsrc_data->framedrop_pattern = 0;
+	rsrc_data->packer_cfg = 0;
+	rsrc_data->en_ubwc = 0;
+	rsrc_data->tile_cfg = 0;
+	rsrc_data->h_init = 0;
+	rsrc_data->v_init = 0;
+	rsrc_data->ubwc_meta_stride = 0;
+	rsrc_data->ubwc_mode_cfg_0 = 0;
+	rsrc_data->ubwc_mode_cfg_1 = 0;
+	rsrc_data->ubwc_meta_offset = 0;
+	rsrc_data->init_cfg_done = false;
+	rsrc_data->hfr_cfg_done = false;
+	rsrc_data->en_cfg = 0;
+	rsrc_data->is_dual = 0;
+
+	wm_res->tasklet_info = NULL;
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+static int cam_vfe_bus_start_wm(struct cam_isp_resource_node *wm_res)
+{
+	int rc = 0, val = 0;
+	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+	struct cam_vfe_bus_ver2_common_data        *common_data =
+		rsrc_data->common_data;
+	uint32_t                   bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
+	uint32_t camera_hw_version;
+
+	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
+
+	cam_io_w_mb(rsrc_data->width,
+		common_data->mem_base + rsrc_data->hw_regs->buffer_width_cfg);
+	cam_io_w(rsrc_data->height,
+		common_data->mem_base + rsrc_data->hw_regs->buffer_height_cfg);
+	cam_io_w(rsrc_data->pack_fmt,
+		common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
+
+	/* Configure stride for RDIs */
+	if (rsrc_data->index < 3)
+		cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
+			rsrc_data->hw_regs->stride));
+
+	/* Subscribe IRQ */
+	if (rsrc_data->irq_enabled) {
+		CAM_DBG(CAM_ISP, "Subscribe WM%d IRQ", rsrc_data->index);
+		bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG1] =
+			(1 << rsrc_data->index);
+		wm_res->irq_handle = cam_irq_controller_subscribe_irq(
+			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+			bus_irq_reg_mask, wm_res,
+			wm_res->top_half_handler,
+			cam_ife_mgr_do_tasklet_buf_done,
+			wm_res->tasklet_info, &tasklet_bh_api);
+		if (wm_res->irq_handle < 0) {
+			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for WM %d",
+				rsrc_data->index);
+			return -EFAULT;
+		}
+	}
+
+	/* enable ubwc if needed*/
+	if (rsrc_data->en_ubwc) {
+		rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+		if (rc) {
+			CAM_ERR(CAM_ISP, "Failed to get HW version:%d rc:%d",
+				camera_hw_version, rc);
+			return rc;
+		}
+		if ((camera_hw_version > CAM_CPAS_TITAN_NONE) &&
+			(camera_hw_version < CAM_CPAS_TITAN_175_V100)) {
+			struct cam_vfe_bus_ver2_reg_offset_ubwc_client
+				*ubwc_regs;
+
+			ubwc_regs =
+				(struct
+				cam_vfe_bus_ver2_reg_offset_ubwc_client *)
+				rsrc_data->hw_regs->ubwc_regs;
+			val = cam_io_r_mb(common_data->mem_base +
+				ubwc_regs->mode_cfg_0);
+			val |= 0x1;
+			cam_io_w_mb(val, common_data->mem_base +
+				ubwc_regs->mode_cfg_0);
+		} else if ((camera_hw_version == CAM_CPAS_TITAN_175_V100) ||
+			(camera_hw_version == CAM_CPAS_TITAN_175_V101)) {
+			struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client
+				*ubwc_regs;
+
+			ubwc_regs =
+				(struct
+				cam_vfe_bus_ver2_reg_offset_ubwc_3_client *)
+				rsrc_data->hw_regs->ubwc_regs;
+			val = cam_io_r_mb(common_data->mem_base +
+				ubwc_regs->mode_cfg_0);
+			val |= 0x1;
+			cam_io_w_mb(val, common_data->mem_base +
+				ubwc_regs->mode_cfg_0);
+		} else {
+			CAM_ERR(CAM_ISP, "Invalid HW version: %d",
+				camera_hw_version);
+			return -EINVAL;
+		}
+	}
+
+	/* Enable WM */
+	cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base +
+		rsrc_data->hw_regs->cfg);
+
+	CAM_DBG(CAM_ISP, "WM res %d width = %d, height = %d", rsrc_data->index,
+		rsrc_data->width, rsrc_data->height);
+	CAM_DBG(CAM_ISP, "WM res %d pk_fmt = %d", rsrc_data->index,
+		rsrc_data->pack_fmt & PACKER_FMT_MAX);
+	CAM_DBG(CAM_ISP, "WM res %d stride = %d, burst len = %d",
+		rsrc_data->index, rsrc_data->stride, 0xf);
+	CAM_DBG(CAM_ISP, "enable WM res %d offset 0x%x val 0x%x",
+		rsrc_data->index, (uint32_t) rsrc_data->hw_regs->cfg,
+		rsrc_data->en_cfg);
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return rc;
+}
+
+static int cam_vfe_bus_stop_wm(struct cam_isp_resource_node *wm_res)
+{
+	int rc = 0;
+	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+	struct cam_vfe_bus_ver2_common_data        *common_data =
+		rsrc_data->common_data;
+
+	/* Disable WM */
+	cam_io_w_mb(0x0,
+		common_data->mem_base + rsrc_data->hw_regs->cfg);
+
+	/* Disable all register access, reply on global reset */
+	CAM_DBG(CAM_ISP, "WM res %d irq_enabled %d",
+		rsrc_data->index, rsrc_data->irq_enabled);
+	/* Unsubscribe IRQ */
+	if (rsrc_data->irq_enabled)
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			wm_res->irq_handle);
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	rsrc_data->init_cfg_done = false;
+	rsrc_data->hfr_cfg_done = false;
+
+	return rc;
+}
+
+static int cam_vfe_bus_handle_wm_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	int32_t                                     rc;
+	int                                         i;
+	struct cam_isp_resource_node               *wm_res = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data   *rsrc_data = NULL;
+	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
+
+	wm_res = th_payload->handler_priv;
+	if (!wm_res) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Error: No resource");
+		return -ENODEV;
+	}
+
+	rsrc_data = wm_res->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->ctx = rsrc_data->ctx;
+	evt_payload->core_index = rsrc_data->common_data->core_index;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+static int cam_vfe_bus_handle_wm_done_bottom_half(void *wm_node,
+	void *evt_payload_priv)
+{
+	int rc = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node          *wm_res = wm_node;
+	struct cam_vfe_bus_irq_evt_payload    *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data =
+		(wm_res == NULL) ? NULL : wm_res->res_priv;
+	uint32_t  *cam_ife_irq_regs;
+	uint32_t   status_reg;
+
+	if (!evt_payload || !rsrc_data)
+		return rc;
+
+	cam_ife_irq_regs = evt_payload->irq_reg_val;
+	status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1];
+
+	if (status_reg & BIT(rsrc_data->index)) {
+		cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS1] &=
+			~BIT(rsrc_data->index);
+		rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+	CAM_DBG(CAM_ISP, "status_reg %x rc %d wm_idx %d",
+		status_reg, rc, rsrc_data->index);
+
+	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
+		cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
+			&evt_payload);
+
+	return rc;
+}
+
+
+static int cam_vfe_bus_err_bottom_half(void *ctx_priv,
+	void *evt_payload_priv)
+{
+	struct cam_vfe_bus_irq_evt_payload *evt_payload;
+	struct cam_vfe_bus_ver2_common_data *common_data;
+	uint32_t val = 0;
+
+	if (!ctx_priv || !evt_payload_priv)
+		return -EINVAL;
+
+	evt_payload = evt_payload_priv;
+	common_data = evt_payload->ctx;
+
+	val = evt_payload->debug_status_0;
+	CAM_ERR(CAM_ISP, "Bus Violation: debug_status_0 = 0x%x", val);
+
+	if (val & 0x01)
+		CAM_INFO(CAM_ISP, "RDI 0 violation");
+
+	if (val & 0x02)
+		CAM_INFO(CAM_ISP, "RDI 1 violation");
+
+	if (val & 0x04)
+		CAM_INFO(CAM_ISP, "RDI 2 violation");
+
+	if (val & 0x08)
+		CAM_INFO(CAM_ISP, "VID Y 1:1 UBWC violation");
+
+	if (val & 0x010)
+		CAM_INFO(CAM_ISP, "VID C 1:1 UBWC violation");
+
+	if (val & 0x020)
+		CAM_INFO(CAM_ISP, "VID YC 4:1 violation");
+
+	if (val & 0x040)
+		CAM_INFO(CAM_ISP, "VID YC 16:1 violation");
+
+	if (val & 0x080)
+		CAM_INFO(CAM_ISP, "FD Y violation");
+
+	if (val & 0x0100)
+		CAM_INFO(CAM_ISP, "FD C violation");
+
+	if (val & 0x0200)
+		CAM_INFO(CAM_ISP, "RAW DUMP violation");
+
+	if (val & 0x0400)
+		CAM_INFO(CAM_ISP, "PDAF violation");
+
+	if (val & 0x0800)
+		CAM_INFO(CAM_ISP, "STATs HDR BE violation");
+
+	if (val & 0x01000)
+		CAM_INFO(CAM_ISP, "STATs HDR BHIST violation");
+
+	if (val & 0x02000)
+		CAM_INFO(CAM_ISP, "STATs TINTLESS BG violation");
+
+	if (val & 0x04000)
+		CAM_INFO(CAM_ISP, "STATs BF violation");
+
+	if (val & 0x08000)
+		CAM_INFO(CAM_ISP, "STATs AWB BG UBWC violation");
+
+	if (val & 0x010000)
+		CAM_INFO(CAM_ISP, "STATs BHIST violation");
+
+	if (val & 0x020000)
+		CAM_INFO(CAM_ISP, "STATs RS violation");
+
+	if (val & 0x040000)
+		CAM_INFO(CAM_ISP, "STATs CS violation");
+
+	if (val & 0x080000)
+		CAM_INFO(CAM_ISP, "STATs IHIST violation");
+
+	if (val & 0x0100000)
+		CAM_INFO(CAM_ISP, "DISP Y 1:1 UBWC violation");
+
+	if (val & 0x0200000)
+		CAM_INFO(CAM_ISP, "DISP C 1:1 UBWC violation");
+
+	if (val & 0x0400000)
+		CAM_INFO(CAM_ISP, "DISP YC 4:1 violation");
+
+	if (val & 0x0800000)
+		CAM_INFO(CAM_ISP, "DISP YC 16:1 violation");
+
+	cam_vfe_bus_put_evt_payload(common_data, &evt_payload);
+	return 0;
+}
+
+static int cam_vfe_bus_init_wm_resource(uint32_t index,
+	struct cam_vfe_bus_ver2_priv    *ver2_bus_priv,
+	struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
+	struct cam_isp_resource_node    *wm_res)
+{
+	struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_wm_resource_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv");
+		return -ENOMEM;
+	}
+	wm_res->res_priv = rsrc_data;
+
+	rsrc_data->index = index;
+	rsrc_data->hw_regs = &ver2_hw_info->bus_client_reg[index];
+	rsrc_data->common_data = &ver2_bus_priv->common_data;
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&wm_res->list);
+
+	wm_res->start = cam_vfe_bus_start_wm;
+	wm_res->stop = cam_vfe_bus_stop_wm;
+	wm_res->top_half_handler = cam_vfe_bus_handle_wm_done_top_half;
+	wm_res->bottom_half_handler = cam_vfe_bus_handle_wm_done_bottom_half;
+	wm_res->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_vfe_bus_deinit_wm_resource(
+	struct cam_isp_resource_node    *wm_res)
+{
+	struct cam_vfe_bus_ver2_wm_resource_data *rsrc_data;
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	INIT_LIST_HEAD(&wm_res->list);
+
+	wm_res->start = NULL;
+	wm_res->stop = NULL;
+	wm_res->top_half_handler = NULL;
+	wm_res->bottom_half_handler = NULL;
+	wm_res->hw_intf = NULL;
+
+	rsrc_data = wm_res->res_priv;
+	wm_res->res_priv = NULL;
+	if (!rsrc_data)
+		return -ENOMEM;
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static void cam_vfe_bus_add_wm_to_comp_grp(
+	struct cam_isp_resource_node    *comp_grp,
+	uint32_t                         composite_mask)
+{
+	struct cam_vfe_bus_ver2_comp_grp_data  *rsrc_data = comp_grp->res_priv;
+
+	rsrc_data->composite_mask |= composite_mask;
+}
+
+static void cam_vfe_bus_match_comp_grp(
+	struct cam_vfe_bus_ver2_priv  *ver2_bus_priv,
+	struct cam_isp_resource_node **comp_grp,
+	uint32_t                       comp_grp_local_idx,
+	uint32_t                       unique_id)
+{
+	struct cam_vfe_bus_ver2_comp_grp_data  *rsrc_data = NULL;
+	struct cam_isp_resource_node           *comp_grp_local = NULL;
+
+	list_for_each_entry(comp_grp_local,
+		&ver2_bus_priv->used_comp_grp, list) {
+		rsrc_data = comp_grp_local->res_priv;
+		if (rsrc_data->comp_grp_local_idx == comp_grp_local_idx &&
+			rsrc_data->unique_id == unique_id) {
+			/* Match found */
+			*comp_grp = comp_grp_local;
+			return;
+		}
+	}
+
+	*comp_grp = NULL;
+}
+
+static int cam_vfe_bus_acquire_comp_grp(
+	struct cam_vfe_bus_ver2_priv        *ver2_bus_priv,
+	struct cam_isp_out_port_info        *out_port_info,
+	void                                *tasklet,
+	void                                *ctx,
+	uint32_t                             unique_id,
+	uint32_t                             is_dual,
+	uint32_t                             is_master,
+	enum cam_vfe_bus_ver2_vfe_core_id    dual_slave_core,
+	struct cam_isp_resource_node       **comp_grp)
+{
+	int rc = 0;
+	uint32_t bus_comp_grp_id;
+	struct cam_isp_resource_node           *comp_grp_local = NULL;
+	struct cam_vfe_bus_ver2_comp_grp_data  *rsrc_data = NULL;
+
+	bus_comp_grp_id = cam_vfe_bus_comp_grp_id_convert(
+		out_port_info->comp_grp_id);
+	/* Perform match only if there is valid comp grp request */
+	if (out_port_info->comp_grp_id != CAM_ISP_RES_COMP_GROUP_NONE) {
+		/* Check if matching comp_grp already acquired */
+		cam_vfe_bus_match_comp_grp(ver2_bus_priv, &comp_grp_local,
+			bus_comp_grp_id, unique_id);
+	}
+
+	if (!comp_grp_local) {
+		/* First find a free group */
+		if (is_dual) {
+			CAM_DBG(CAM_ISP, "Acquire dual comp group");
+			if (list_empty(&ver2_bus_priv->free_dual_comp_grp)) {
+				CAM_ERR(CAM_ISP, "No Free Composite Group");
+				return -ENODEV;
+			}
+			comp_grp_local = list_first_entry(
+				&ver2_bus_priv->free_dual_comp_grp,
+				struct cam_isp_resource_node, list);
+			rsrc_data = comp_grp_local->res_priv;
+			rc = cam_vfe_bus_ver2_get_intra_client_mask(
+				dual_slave_core,
+				comp_grp_local->hw_intf->hw_idx,
+				&rsrc_data->intra_client_mask);
+			if (rc)
+				return rc;
+		} else {
+			CAM_DBG(CAM_ISP, "Acquire comp group");
+			if (list_empty(&ver2_bus_priv->free_comp_grp)) {
+				CAM_ERR(CAM_ISP, "No Free Composite Group");
+				return -ENODEV;
+			}
+			comp_grp_local = list_first_entry(
+				&ver2_bus_priv->free_comp_grp,
+				struct cam_isp_resource_node, list);
+			rsrc_data = comp_grp_local->res_priv;
+		}
+
+		list_del(&comp_grp_local->list);
+		comp_grp_local->tasklet_info = tasklet;
+		comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+		rsrc_data->is_master = is_master;
+		rsrc_data->composite_mask = 0;
+		rsrc_data->unique_id = unique_id;
+		rsrc_data->comp_grp_local_idx = bus_comp_grp_id;
+
+		if (is_master)
+			rsrc_data->addr_sync_mode = 0;
+		else
+			rsrc_data->addr_sync_mode = 1;
+
+		list_add_tail(&comp_grp_local->list,
+			&ver2_bus_priv->used_comp_grp);
+
+	} else {
+		rsrc_data = comp_grp_local->res_priv;
+		/* Do not support runtime change in composite mask */
+		if (comp_grp_local->res_state ==
+			CAM_ISP_RESOURCE_STATE_STREAMING) {
+			CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u",
+				comp_grp_local->res_state,
+				rsrc_data->comp_grp_type);
+			return -EBUSY;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Comp Grp type %u", rsrc_data->comp_grp_type);
+
+	rsrc_data->ctx = ctx;
+	rsrc_data->acquire_dev_cnt++;
+	*comp_grp = comp_grp_local;
+
+	return rc;
+}
+
+static int cam_vfe_bus_release_comp_grp(
+	struct cam_vfe_bus_ver2_priv         *ver2_bus_priv,
+	struct cam_isp_resource_node         *in_comp_grp)
+{
+	struct cam_isp_resource_node           *comp_grp = NULL;
+	struct cam_vfe_bus_ver2_comp_grp_data  *in_rsrc_data = NULL;
+	int match_found = 0;
+
+	if (!in_comp_grp) {
+		CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", in_comp_grp);
+		return -EINVAL;
+	}
+
+	if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "Already released Comp Grp");
+		return 0;
+	}
+
+	if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CAM_ERR(CAM_ISP, "Invalid State %d",
+			in_comp_grp->res_state);
+		return -EBUSY;
+	}
+
+	in_rsrc_data = in_comp_grp->res_priv;
+	CAM_DBG(CAM_ISP, "Comp Grp type %u", in_rsrc_data->comp_grp_type);
+
+	list_for_each_entry(comp_grp, &ver2_bus_priv->used_comp_grp, list) {
+		if (comp_grp == in_comp_grp) {
+			match_found = 1;
+			break;
+		}
+	}
+
+	if (!match_found) {
+		CAM_ERR(CAM_ISP, "Could not find matching Comp Grp type %u",
+			in_rsrc_data->comp_grp_type);
+		return -ENODEV;
+	}
+
+	in_rsrc_data->acquire_dev_cnt--;
+	if (in_rsrc_data->acquire_dev_cnt == 0) {
+		list_del(&comp_grp->list);
+
+		in_rsrc_data->unique_id = 0;
+		in_rsrc_data->comp_grp_local_idx = CAM_VFE_BUS_COMP_GROUP_NONE;
+		in_rsrc_data->composite_mask = 0;
+		in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+		in_rsrc_data->addr_sync_mode = 0;
+
+		comp_grp->tasklet_info = NULL;
+		comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+		if (in_rsrc_data->comp_grp_type >=
+			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+			in_rsrc_data->comp_grp_type <=
+			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5)
+			list_add_tail(&comp_grp->list,
+				&ver2_bus_priv->free_dual_comp_grp);
+		else if (in_rsrc_data->comp_grp_type >=
+			CAM_VFE_BUS_VER2_COMP_GRP_0 &&
+			in_rsrc_data->comp_grp_type <=
+			CAM_VFE_BUS_VER2_COMP_GRP_5)
+			list_add_tail(&comp_grp->list,
+				&ver2_bus_priv->free_comp_grp);
+	}
+
+	return 0;
+}
+
+static int cam_vfe_bus_start_comp_grp(struct cam_isp_resource_node *comp_grp)
+{
+	int rc = 0;
+	uint32_t addr_sync_cfg;
+	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data =
+		comp_grp->res_priv;
+	struct cam_vfe_bus_ver2_common_data        *common_data =
+		rsrc_data->common_data;
+	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_IRQ_MAX] = {0};
+
+	CAM_DBG(CAM_ISP, "comp group id:%d streaming state:%d",
+		rsrc_data->comp_grp_type, comp_grp->res_state);
+
+	cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
+		rsrc_data->hw_regs->comp_mask);
+	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		return 0;
+
+	CAM_DBG(CAM_ISP, "composite_mask is 0x%x", rsrc_data->composite_mask);
+	CAM_DBG(CAM_ISP, "composite_mask addr 0x%x",
+		rsrc_data->hw_regs->comp_mask);
+
+	if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) {
+		int dual_comp_grp = (rsrc_data->comp_grp_type -
+			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
+
+		if (rsrc_data->is_master) {
+			int intra_client_en = cam_io_r_mb(
+				common_data->mem_base +
+				common_data->common_reg->dual_master_comp_cfg);
+
+			/*
+			 * 2 Bits per comp_grp. Hence left shift by
+			 * comp_grp * 2
+			 */
+			intra_client_en |=
+				(rsrc_data->intra_client_mask <<
+					(dual_comp_grp * 2));
+
+			cam_io_w_mb(intra_client_en, common_data->mem_base +
+				common_data->common_reg->dual_master_comp_cfg);
+
+			bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG2] =
+				(1 << dual_comp_grp);
+		}
+
+		CAM_DBG(CAM_ISP, "addr_sync_mask addr 0x%x",
+			rsrc_data->hw_regs->addr_sync_mask);
+		cam_io_w_mb(rsrc_data->composite_mask, common_data->mem_base +
+			rsrc_data->hw_regs->addr_sync_mask);
+
+		addr_sync_cfg = cam_io_r_mb(common_data->mem_base +
+			common_data->common_reg->addr_sync_cfg);
+		addr_sync_cfg |= (rsrc_data->addr_sync_mode << dual_comp_grp);
+		/*
+		 * 2 Bits per dual_comp_grp. dual_comp_grp stats at bit number
+		 * 8. Hence left shift cdual_comp_grp dual comp_grp * 2 and
+		 * add 8
+		 */
+		addr_sync_cfg |=
+			(rsrc_data->intra_client_mask <<
+				((dual_comp_grp * 2) +
+				CAM_VFE_BUS_ADDR_SYNC_INTRA_CLIENT_SHIFT));
+		cam_io_w_mb(addr_sync_cfg, common_data->mem_base +
+			common_data->common_reg->addr_sync_cfg);
+
+		common_data->addr_no_sync &= ~(rsrc_data->composite_mask);
+		cam_io_w_mb(common_data->addr_no_sync, common_data->mem_base +
+			common_data->common_reg->addr_sync_no_sync);
+		CAM_DBG(CAM_ISP, "addr_sync_cfg: 0x%x addr_no_sync_cfg: 0x%x",
+			addr_sync_cfg, common_data->addr_no_sync);
+	} else {
+		/* IRQ bits for COMP GRP start at 5. So add 5 to the shift */
+		bus_irq_reg_mask[CAM_VFE_BUS_IRQ_REG0] =
+			(1 << (rsrc_data->comp_grp_type + 5));
+	}
+
+	/*
+	 * For Dual composite subscribe IRQ only for master
+	 * For regular composite, subscribe IRQ always
+	 */
+	CAM_DBG(CAM_ISP, "Subscribe COMP_GRP%d IRQ", rsrc_data->comp_grp_type);
+	if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
+		(rsrc_data->is_master)) ||
+		(rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
+		comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
+			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+			bus_irq_reg_mask, comp_grp,
+			comp_grp->top_half_handler,
+			cam_ife_mgr_do_tasklet_buf_done,
+			comp_grp->tasklet_info, &tasklet_bh_api);
+		if (comp_grp->irq_handle < 0) {
+			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
+				rsrc_data->comp_grp_type);
+			return -EFAULT;
+		}
+	}
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return rc;
+}
+
+static int cam_vfe_bus_stop_comp_grp(struct cam_isp_resource_node *comp_grp)
+{
+	int rc = 0;
+	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data =
+		comp_grp->res_priv;
+	struct cam_vfe_bus_ver2_common_data        *common_data =
+		rsrc_data->common_data;
+
+	/* Unsubscribe IRQ */
+	if (((rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5) &&
+		(rsrc_data->is_master)) ||
+		(rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			comp_grp->irq_handle);
+	}
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return rc;
+}
+
+static int cam_vfe_bus_handle_comp_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	int32_t                                     rc;
+	int                                         i;
+	struct cam_isp_resource_node               *comp_grp = NULL;
+	struct cam_vfe_bus_ver2_comp_grp_data      *rsrc_data = NULL;
+	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
+
+	comp_grp = th_payload->handler_priv;
+	if (!comp_grp) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
+		return -ENODEV;
+	}
+
+	rsrc_data = comp_grp->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+	CAM_DBG(CAM_ISP, "IRQ status_2 = 0x%x", th_payload->evt_status_arr[2]);
+
+	rc  = cam_vfe_bus_get_evt_payload(rsrc_data->common_data, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x status_2 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1],
+			th_payload->evt_status_arr[2]);
+
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->ctx = rsrc_data->ctx;
+	evt_payload->core_index = rsrc_data->common_data->core_index;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+static int cam_vfe_bus_handle_comp_done_bottom_half(
+	void                *handler_priv,
+	void                *evt_payload_priv)
+{
+	int rc = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node          *comp_grp = handler_priv;
+	struct cam_vfe_bus_irq_evt_payload    *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = comp_grp->res_priv;
+	uint32_t                              *cam_ife_irq_regs;
+	uint32_t                               status_reg;
+	uint32_t                               comp_err_reg;
+	uint32_t                               comp_grp_id;
+
+	CAM_DBG(CAM_ISP, "comp grp type %d", rsrc_data->comp_grp_type);
+
+	if (!evt_payload)
+		return rc;
+
+	cam_ife_irq_regs = evt_payload->irq_reg_val;
+
+	switch (rsrc_data->comp_grp_type) {
+	case CAM_VFE_BUS_VER2_COMP_GRP_0:
+	case CAM_VFE_BUS_VER2_COMP_GRP_1:
+	case CAM_VFE_BUS_VER2_COMP_GRP_2:
+	case CAM_VFE_BUS_VER2_COMP_GRP_3:
+	case CAM_VFE_BUS_VER2_COMP_GRP_4:
+	case CAM_VFE_BUS_VER2_COMP_GRP_5:
+		comp_grp_id = (rsrc_data->comp_grp_type -
+			CAM_VFE_BUS_VER2_COMP_GRP_0);
+
+		/* Check for Regular composite error */
+		status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0];
+
+		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_COMP_ERR];
+		if ((status_reg & BIT(11)) &&
+			(comp_err_reg & rsrc_data->composite_mask)) {
+			/* Check for Regular composite error */
+			rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
+			break;
+		}
+
+		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_COMP_OWRT];
+		/* Check for Regular composite Overwrite */
+		if ((status_reg & BIT(12)) &&
+			(comp_err_reg & rsrc_data->composite_mask)) {
+			rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
+			break;
+		}
+
+		/* Regular Composite SUCCESS */
+		if (status_reg & BIT(comp_grp_id + 5)) {
+			rsrc_data->irq_trigger_cnt++;
+			if (rsrc_data->irq_trigger_cnt ==
+				rsrc_data->acquire_dev_cnt) {
+				cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS0] &=
+					~BIT(comp_grp_id + 5);
+				rsrc_data->irq_trigger_cnt = 0;
+			}
+			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+
+		CAM_DBG(CAM_ISP, "status reg = 0x%x, bit index = %d rc %d",
+			status_reg, (comp_grp_id + 5), rc);
+		break;
+
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4:
+	case CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5:
+		comp_grp_id = (rsrc_data->comp_grp_type -
+			CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0);
+
+		/* Check for DUAL composite error */
+		status_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2];
+
+		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_ERR];
+		if ((status_reg & BIT(6)) &&
+			(comp_err_reg & rsrc_data->composite_mask)) {
+			/* Check for DUAL composite error */
+			rc = CAM_VFE_IRQ_STATUS_ERR_COMP;
+			break;
+		}
+
+		/* Check for Dual composite Overwrite */
+		comp_err_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_DUAL_COMP_OWRT];
+		if ((status_reg & BIT(7)) &&
+			(comp_err_reg & rsrc_data->composite_mask)) {
+			rc = CAM_VFE_IRQ_STATUS_COMP_OWRT;
+			break;
+		}
+
+		/* DUAL Composite SUCCESS */
+		if (status_reg & BIT(comp_grp_id)) {
+			rsrc_data->irq_trigger_cnt++;
+			if (rsrc_data->irq_trigger_cnt ==
+				rsrc_data->acquire_dev_cnt) {
+				cam_ife_irq_regs[CAM_IFE_IRQ_BUS_REG_STATUS2] &=
+					~BIT(comp_grp_id);
+				rsrc_data->irq_trigger_cnt = 0;
+			}
+			rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+
+		break;
+	default:
+		rc = CAM_VFE_IRQ_STATUS_ERR;
+		CAM_ERR(CAM_ISP, "Invalid comp_grp_type %u",
+			rsrc_data->comp_grp_type);
+		break;
+	}
+
+	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
+		cam_vfe_bus_put_evt_payload(rsrc_data->common_data,
+			&evt_payload);
+
+	return rc;
+}
+
+static int cam_vfe_bus_init_comp_grp(uint32_t index,
+	struct cam_vfe_bus_ver2_priv    *ver2_bus_priv,
+	struct cam_vfe_bus_ver2_hw_info *ver2_hw_info,
+	struct cam_isp_resource_node    *comp_grp)
+{
+	struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data = NULL;
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_comp_grp_data),
+		GFP_KERNEL);
+	if (!rsrc_data)
+		return -ENOMEM;
+
+	comp_grp->res_priv = rsrc_data;
+
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&comp_grp->list);
+
+	rsrc_data->comp_grp_type   = index;
+	rsrc_data->common_data     = &ver2_bus_priv->common_data;
+	rsrc_data->hw_regs         = &ver2_hw_info->comp_grp_reg[index];
+	rsrc_data->dual_slave_core = CAM_VFE_BUS_VER2_VFE_CORE_MAX;
+
+	if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0 &&
+		rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5)
+		list_add_tail(&comp_grp->list,
+			&ver2_bus_priv->free_dual_comp_grp);
+	else if (rsrc_data->comp_grp_type >= CAM_VFE_BUS_VER2_COMP_GRP_0
+		&& rsrc_data->comp_grp_type <= CAM_VFE_BUS_VER2_COMP_GRP_5)
+		list_add_tail(&comp_grp->list, &ver2_bus_priv->free_comp_grp);
+
+	comp_grp->start = cam_vfe_bus_start_comp_grp;
+	comp_grp->stop = cam_vfe_bus_stop_comp_grp;
+	comp_grp->top_half_handler = cam_vfe_bus_handle_comp_done_top_half;
+	comp_grp->bottom_half_handler =
+		cam_vfe_bus_handle_comp_done_bottom_half;
+	comp_grp->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_vfe_bus_deinit_comp_grp(
+	struct cam_isp_resource_node    *comp_grp)
+{
+	struct cam_vfe_bus_ver2_comp_grp_data *rsrc_data =
+		comp_grp->res_priv;
+
+	comp_grp->start = NULL;
+	comp_grp->stop = NULL;
+	comp_grp->top_half_handler = NULL;
+	comp_grp->bottom_half_handler = NULL;
+	comp_grp->hw_intf = NULL;
+
+	list_del_init(&comp_grp->list);
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+
+	comp_grp->res_priv = NULL;
+
+	if (!rsrc_data) {
+		CAM_ERR(CAM_ISP, "comp_grp_priv is NULL");
+		return -ENODEV;
+	}
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static int cam_vfe_bus_get_secure_mode(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	bool *mode = cmd_args;
+	struct cam_isp_resource_node *res =
+		(struct cam_isp_resource_node *) priv;
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data =
+		(struct cam_vfe_bus_ver2_vfe_out_data *)res->res_priv;
+
+	*mode =
+		(rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ?
+		true : false;
+
+	return 0;
+}
+
+static int cam_vfe_bus_acquire_vfe_out(void *bus_priv, void *acquire_args,
+	uint32_t args_size)
+{
+	int                                     rc = -ENODEV;
+	int                                     i;
+	enum cam_vfe_bus_ver2_vfe_out_type      vfe_out_res_id;
+	uint32_t                                format;
+	int                                     num_wm;
+	uint32_t                                subscribe_irq;
+	uint32_t                                client_done_mask;
+	struct cam_vfe_bus_ver2_priv           *ver2_bus_priv = bus_priv;
+	struct cam_vfe_acquire_args            *acq_args = acquire_args;
+	struct cam_vfe_hw_vfe_out_acquire_args *out_acquire_args;
+	struct cam_isp_resource_node           *rsrc_node = NULL;
+	struct cam_vfe_bus_ver2_vfe_out_data   *rsrc_data = NULL;
+	uint32_t                                secure_caps = 0, mode;
+
+	if (!bus_priv || !acquire_args) {
+		CAM_ERR(CAM_ISP, "Invalid Param");
+		return -EINVAL;
+	}
+
+	out_acquire_args = &acq_args->vfe_out;
+	format = out_acquire_args->out_port_info->format;
+
+	CAM_DBG(CAM_ISP, "Acquiring resource type 0x%x",
+		out_acquire_args->out_port_info->res_type);
+
+	vfe_out_res_id = cam_vfe_bus_get_out_res_id(
+		out_acquire_args->out_port_info->res_type);
+	if (vfe_out_res_id == CAM_VFE_BUS_VER2_VFE_OUT_MAX)
+		return -ENODEV;
+
+	num_wm = cam_vfe_bus_get_num_wm(vfe_out_res_id, format);
+	if (num_wm < 1)
+		return -EINVAL;
+
+	rsrc_node = &ver2_bus_priv->vfe_out[vfe_out_res_id];
+	if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d",
+			vfe_out_res_id, rsrc_node->res_state);
+		return -EBUSY;
+	}
+
+	rsrc_data = rsrc_node->res_priv;
+	secure_caps = cam_vfe_bus_can_be_secure(
+		rsrc_data->out_type);
+	mode = out_acquire_args->out_port_info->secure_mode;
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (!rsrc_data->common_data->num_sec_out) {
+			rsrc_data->secure_mode = mode;
+			rsrc_data->common_data->secure_mode = mode;
+		} else {
+			if (mode == rsrc_data->common_data->secure_mode) {
+				rsrc_data->secure_mode =
+					rsrc_data->common_data->secure_mode;
+			} else {
+				rc = -EINVAL;
+				CAM_ERR_RATE_LIMIT(CAM_ISP,
+					"Mismatch: Acquire mode[%d], drvr mode[%d]",
+					rsrc_data->common_data->secure_mode,
+					mode);
+				mutex_unlock(
+					&rsrc_data->common_data->bus_mutex);
+				return -EINVAL;
+			}
+		}
+		rsrc_data->common_data->num_sec_out++;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
+	ver2_bus_priv->tasklet_info = acq_args->tasklet;
+	rsrc_data->num_wm = num_wm;
+	rsrc_node->res_id = out_acquire_args->out_port_info->res_type;
+	rsrc_node->tasklet_info = acq_args->tasklet;
+	rsrc_node->cdm_ops = out_acquire_args->cdm_ops;
+	rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops;
+
+	/* Reserve Composite Group */
+	if (num_wm > 1 || (out_acquire_args->is_dual) ||
+		(out_acquire_args->out_port_info->comp_grp_id >
+		CAM_ISP_RES_COMP_GROUP_NONE &&
+		out_acquire_args->out_port_info->comp_grp_id <
+		CAM_ISP_RES_COMP_GROUP_ID_MAX)) {
+
+		rc = cam_vfe_bus_acquire_comp_grp(ver2_bus_priv,
+			out_acquire_args->out_port_info,
+			acq_args->tasklet,
+			out_acquire_args->ctx,
+			out_acquire_args->unique_id,
+			out_acquire_args->is_dual,
+			out_acquire_args->is_master,
+			out_acquire_args->dual_slave_core,
+			&rsrc_data->comp_grp);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"VFE%d Comp_Grp acquire fail for Out %d rc=%d",
+				rsrc_data->common_data->core_index,
+				vfe_out_res_id, rc);
+			return rc;
+		}
+
+		subscribe_irq = 0;
+	} else {
+		subscribe_irq = 1;
+	}
+
+	/* Reserve WM */
+	for (i = 0; i < num_wm; i++) {
+		rc = cam_vfe_bus_acquire_wm(ver2_bus_priv,
+			out_acquire_args->out_port_info,
+			acq_args->tasklet,
+			out_acquire_args->ctx,
+			vfe_out_res_id,
+			i,
+			subscribe_irq,
+			&rsrc_data->wm_res[i],
+			&client_done_mask,
+			out_acquire_args->is_dual);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"VFE%d WM acquire failed for Out %d rc=%d",
+				rsrc_data->common_data->core_index,
+				vfe_out_res_id, rc);
+			goto release_wm;
+		}
+
+		if (rsrc_data->comp_grp)
+			cam_vfe_bus_add_wm_to_comp_grp(rsrc_data->comp_grp,
+				client_done_mask);
+	}
+
+	rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	out_acquire_args->rsrc_node = rsrc_node;
+
+	CAM_DBG(CAM_ISP, "Acquire successful");
+	return rc;
+
+release_wm:
+	for (i--; i >= 0; i--)
+		cam_vfe_bus_release_wm(ver2_bus_priv, rsrc_data->wm_res[i]);
+
+	cam_vfe_bus_release_comp_grp(ver2_bus_priv,
+		rsrc_data->comp_grp);
+
+	return rc;
+}
+
+static int cam_vfe_bus_release_vfe_out(void *bus_priv, void *release_args,
+	uint32_t args_size)
+{
+	uint32_t i;
+	struct cam_isp_resource_node          *vfe_out = NULL;
+	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
+	uint32_t                               secure_caps = 0;
+
+	if (!bus_priv || !release_args) {
+		CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
+			bus_priv, release_args);
+		return -EINVAL;
+	}
+
+	vfe_out = release_args;
+	rsrc_data = vfe_out->res_priv;
+
+	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d",
+			vfe_out->res_state);
+	}
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		cam_vfe_bus_release_wm(bus_priv, rsrc_data->wm_res[i]);
+	rsrc_data->num_wm = 0;
+
+	if (rsrc_data->comp_grp)
+		cam_vfe_bus_release_comp_grp(bus_priv, rsrc_data->comp_grp);
+	rsrc_data->comp_grp = NULL;
+
+	vfe_out->tasklet_info = NULL;
+	vfe_out->cdm_ops = NULL;
+	rsrc_data->cdm_util_ops = NULL;
+
+	secure_caps = cam_vfe_bus_can_be_secure(rsrc_data->out_type);
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (rsrc_data->secure_mode ==
+			rsrc_data->common_data->secure_mode) {
+			rsrc_data->common_data->num_sec_out--;
+			rsrc_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+		} else {
+			/*
+			 * The validity of the mode is properly
+			 * checked while acquiring the output port.
+			 * not expected to reach here, unless there is
+			 * some corruption.
+			 */
+			CAM_ERR(CAM_ISP, "driver[%d],resource[%d] mismatch",
+				rsrc_data->common_data->secure_mode,
+				rsrc_data->secure_mode);
+		}
+
+		if (!rsrc_data->common_data->num_sec_out)
+			rsrc_data->common_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
+	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED)
+		vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+static int cam_vfe_bus_start_vfe_out(
+	struct cam_isp_resource_node          *vfe_out)
+{
+	int rc = 0, i;
+	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
+	struct cam_vfe_bus_ver2_common_data   *common_data = NULL;
+
+	if (!vfe_out) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+
+	rsrc_data = vfe_out->res_priv;
+	common_data = rsrc_data->common_data;
+
+	CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
+
+	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d",
+			vfe_out->res_state);
+		return -EACCES;
+	}
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		rc = cam_vfe_bus_start_wm(rsrc_data->wm_res[i]);
+
+	if (rsrc_data->comp_grp)
+		rc = cam_vfe_bus_start_comp_grp(rsrc_data->comp_grp);
+
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+	return rc;
+}
+
+static int cam_vfe_bus_stop_vfe_out(
+	struct cam_isp_resource_node          *vfe_out)
+{
+	int rc = 0, i;
+	struct cam_vfe_bus_ver2_vfe_out_data  *rsrc_data = NULL;
+
+	if (!vfe_out) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+
+	rsrc_data = vfe_out->res_priv;
+
+	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
+		vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP, "vfe_out res_state is %d", vfe_out->res_state);
+		return rc;
+	}
+
+	if (rsrc_data->comp_grp)
+		rc = cam_vfe_bus_stop_comp_grp(rsrc_data->comp_grp);
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		rc = cam_vfe_bus_stop_wm(rsrc_data->wm_res[i]);
+
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_vfe_bus_handle_vfe_out_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_bus_handle_vfe_out_done_bottom_half(
+	void                *handler_priv,
+	void                *evt_payload_priv)
+{
+	int rc = -EINVAL;
+	struct cam_isp_resource_node         *vfe_out = handler_priv;
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+	/*
+	 * If this resource has Composite Group then we only handle
+	 * Composite done. We acquire Composite if number of WM > 1.
+	 * So Else case is only one individual buf_done = WM[0].
+	 */
+	if (rsrc_data->comp_grp) {
+		rc = rsrc_data->comp_grp->bottom_half_handler(
+			rsrc_data->comp_grp, evt_payload_priv);
+	} else {
+		rc = rsrc_data->wm_res[0]->bottom_half_handler(
+			rsrc_data->wm_res[0], evt_payload_priv);
+	}
+
+	CAM_DBG(CAM_ISP, "vfe_out %d rc %d", rsrc_data->out_type, rc);
+
+	return rc;
+}
+
+static int cam_vfe_bus_init_vfe_out_resource(uint32_t  index,
+	struct cam_vfe_bus_ver2_priv                  *ver2_bus_priv,
+	struct cam_vfe_bus_ver2_hw_info               *ver2_hw_info)
+{
+	struct cam_isp_resource_node         *vfe_out = NULL;
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = NULL;
+	int rc = 0;
+	int32_t vfe_out_type =
+		ver2_hw_info->vfe_out_hw_info[index].vfe_out_type;
+
+	if (vfe_out_type < 0 ||
+		vfe_out_type >= CAM_VFE_BUS_VER2_VFE_OUT_MAX) {
+		CAM_ERR(CAM_ISP, "Init VFE Out failed, Invalid type=%d",
+			vfe_out_type);
+		return -EINVAL;
+	}
+
+	vfe_out = &ver2_bus_priv->vfe_out[vfe_out_type];
+	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE ||
+		vfe_out->res_priv) {
+		CAM_ERR(CAM_ISP,
+			"Error. Looks like same resource is init again");
+		return -EFAULT;
+	}
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver2_vfe_out_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	vfe_out->res_priv = rsrc_data;
+
+	vfe_out->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&vfe_out->list);
+
+	rsrc_data->out_type    =
+		ver2_hw_info->vfe_out_hw_info[index].vfe_out_type;
+	rsrc_data->common_data = &ver2_bus_priv->common_data;
+	rsrc_data->max_width   =
+		ver2_hw_info->vfe_out_hw_info[index].max_width;
+	rsrc_data->max_height  =
+		ver2_hw_info->vfe_out_hw_info[index].max_height;
+	rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE;
+
+	vfe_out->start = cam_vfe_bus_start_vfe_out;
+	vfe_out->stop = cam_vfe_bus_stop_vfe_out;
+	vfe_out->top_half_handler = cam_vfe_bus_handle_vfe_out_done_top_half;
+	vfe_out->bottom_half_handler =
+		cam_vfe_bus_handle_vfe_out_done_bottom_half;
+	vfe_out->process_cmd = cam_vfe_bus_process_cmd;
+	vfe_out->hw_intf = ver2_bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_vfe_bus_deinit_vfe_out_resource(
+	struct cam_isp_resource_node    *vfe_out)
+{
+	struct cam_vfe_bus_ver2_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) {
+		/*
+		 * This is not error. It can happen if the resource is
+		 * never supported in the HW.
+		 */
+		CAM_DBG(CAM_ISP, "HW%d Res %d already deinitialized");
+		return 0;
+	}
+
+	vfe_out->start = NULL;
+	vfe_out->stop = NULL;
+	vfe_out->top_half_handler = NULL;
+	vfe_out->bottom_half_handler = NULL;
+	vfe_out->hw_intf = NULL;
+
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	INIT_LIST_HEAD(&vfe_out->list);
+	vfe_out->res_priv = NULL;
+
+	if (!rsrc_data)
+		return -ENOMEM;
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver2_handle_irq(uint32_t    evt_id,
+	struct cam_irq_th_payload                 *th_payload)
+{
+	struct cam_vfe_bus_ver2_priv          *bus_priv;
+	int rc = 0;
+
+	bus_priv     = th_payload->handler_priv;
+	CAM_DBG(CAM_ISP, "Enter");
+	rc = cam_irq_controller_handle_irq(evt_id,
+		bus_priv->common_data.bus_irq_controller);
+	return (rc == IRQ_HANDLED) ? 0 : -EINVAL;
+}
+
+static int cam_vfe_bus_error_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	int i = 0, rc = 0;
+	struct cam_vfe_bus_ver2_priv *bus_priv =
+		th_payload->handler_priv;
+	struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP, "Bus Err IRQ");
+	for (i = 0; i < th_payload->num_registers; i++) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "vfe:%d: IRQ_Status%d: 0x%x",
+		bus_priv->common_data.core_index, i,
+			th_payload->evt_status_arr[i]);
+	}
+	cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller,
+		bus_priv->error_irq_handle);
+
+	rc  = cam_vfe_bus_get_evt_payload(&bus_priv->common_data, &evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Cannot get payload");
+		return rc;
+	}
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->core_index = bus_priv->common_data.core_index;
+	evt_payload->evt_id  = evt_id;
+	evt_payload->ctx = &bus_priv->common_data;
+	evt_payload->debug_status_0 = cam_io_r_mb(
+		bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->debug_status_0);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+static void cam_vfe_bus_update_ubwc_meta_addr(
+	uint32_t *reg_val_pair,
+	uint32_t  *j,
+	void     *regs,
+	uint64_t  image_buf)
+{
+	uint32_t  camera_hw_version;
+	struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_regs;
+	struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *ubwc_3_regs;
+	int rc = 0;
+
+	if (!regs || !reg_val_pair || !j) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to get HW version rc: %d", rc);
+		goto end;
+	} else if ((camera_hw_version < CAM_CPAS_TITAN_170_V100) ||
+		(camera_hw_version > CAM_CPAS_TITAN_175_V101)) {
+		CAM_ERR(CAM_ISP, "Invalid HW version: %d",
+			camera_hw_version);
+		goto end;
+	}
+
+	switch (camera_hw_version) {
+	case CAM_CPAS_TITAN_170_V100:
+	case CAM_CPAS_TITAN_170_V110:
+	case CAM_CPAS_TITAN_170_V120:
+		ubwc_regs =
+			(struct cam_vfe_bus_ver2_reg_offset_ubwc_client *)regs;
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->meta_addr,
+			image_buf);
+		break;
+	case CAM_CPAS_TITAN_175_V100:
+	case CAM_CPAS_TITAN_175_V101:
+		ubwc_3_regs =
+			(struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *)
+			regs;
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_3_regs->meta_addr,
+			image_buf);
+		break;
+	default:
+		break;
+	}
+end:
+	return;
+}
+
+static int cam_vfe_bus_update_ubwc_3_regs(
+	struct cam_vfe_bus_ver2_wm_resource_data *wm_data,
+	uint32_t *reg_val_pair,	uint32_t i, uint32_t *j)
+{
+	struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *ubwc_regs;
+	uint32_t ubwc_bw_limit = 0;
+	int rc = 0;
+
+	if (!wm_data || !reg_val_pair || !j) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ubwc_regs = (struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client *)
+		wm_data->hw_regs->ubwc_regs;
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		wm_data->hw_regs->packer_cfg, wm_data->packer_cfg);
+	CAM_DBG(CAM_ISP, "WM %d packer cfg 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	if (wm_data->is_dual) {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->tile_cfg, wm_data->tile_cfg);
+	} else {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->tile_cfg, wm_data->tile_cfg);
+		CAM_DBG(CAM_ISP, "WM %d tile cfg 0x%x",
+			wm_data->index, reg_val_pair[*j-1]);
+	}
+
+	if (wm_data->is_dual) {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->h_init, wm_data->offset);
+	} else {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->h_init, wm_data->h_init);
+		CAM_DBG(CAM_ISP, "WM %d h_init 0x%x",
+			wm_data->index, reg_val_pair[*j-1]);
+	}
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->v_init, wm_data->v_init);
+	CAM_DBG(CAM_ISP, "WM %d v_init 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->meta_stride, wm_data->ubwc_meta_stride);
+	CAM_DBG(CAM_ISP, "WM %d meta stride 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->mode_cfg_0, wm_data->ubwc_mode_cfg_0);
+	CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_0 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->mode_cfg_1, wm_data->ubwc_mode_cfg_1);
+	CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_1 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->meta_offset, wm_data->ubwc_meta_offset);
+	CAM_DBG(CAM_ISP, "WM %d ubwc meta offset 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	switch (wm_data->format) {
+	case CAM_FORMAT_UBWC_TP10:
+		ubwc_bw_limit = (0x8 << 1) | BIT(0);
+		break;
+	case CAM_FORMAT_UBWC_NV12_4R:
+		ubwc_bw_limit = (0xB << 1) | BIT(0);
+		break;
+	default:
+		ubwc_bw_limit = 0;
+		break;
+	}
+
+	if (ubwc_bw_limit) {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->bw_limit, ubwc_bw_limit);
+		CAM_DBG(CAM_ISP, "WM %d ubwc bw limit 0x%x",
+			wm_data->index, ubwc_bw_limit);
+	}
+
+end:
+	return rc;
+}
+
+static int cam_vfe_bus_update_ubwc_legacy_regs(
+	struct cam_vfe_bus_ver2_wm_resource_data *wm_data,
+	uint32_t camera_hw_version, uint32_t *reg_val_pair,
+	uint32_t i, uint32_t *j)
+{
+	struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_regs;
+	uint32_t ubwc_bw_limit = 0;
+	int rc = 0;
+
+	if (!wm_data || !reg_val_pair || !j) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ubwc_regs = (struct cam_vfe_bus_ver2_reg_offset_ubwc_client *)
+		wm_data->hw_regs->ubwc_regs;
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		wm_data->hw_regs->packer_cfg, wm_data->packer_cfg);
+	CAM_DBG(CAM_ISP, "WM %d packer cfg 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	if (wm_data->is_dual) {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->tile_cfg, wm_data->tile_cfg);
+	} else {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->tile_cfg, wm_data->tile_cfg);
+		CAM_DBG(CAM_ISP, "WM %d tile cfg 0x%x",
+			wm_data->index, reg_val_pair[*j-1]);
+	}
+
+	if (wm_data->is_dual) {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->h_init, wm_data->offset);
+	} else {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->h_init, wm_data->h_init);
+		CAM_DBG(CAM_ISP, "WM %d h_init 0x%x",
+			wm_data->index, reg_val_pair[*j-1]);
+	}
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->v_init, wm_data->v_init);
+	CAM_DBG(CAM_ISP, "WM %d v_init 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->meta_stride, wm_data->ubwc_meta_stride);
+	CAM_DBG(CAM_ISP, "WM %d meta stride 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->mode_cfg_0, wm_data->ubwc_mode_cfg_0);
+	CAM_DBG(CAM_ISP, "WM %d ubwc_mode_cfg_0 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->meta_offset, wm_data->ubwc_meta_offset);
+	CAM_DBG(CAM_ISP, "WM %d ubwc meta offset 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	if (camera_hw_version == CAM_CPAS_TITAN_170_V110) {
+		switch (wm_data->format) {
+		case CAM_FORMAT_UBWC_TP10:
+			ubwc_bw_limit = 0x8 | BIT(0);
+			break;
+		case CAM_FORMAT_UBWC_NV12_4R:
+			ubwc_bw_limit = 0xB | BIT(0);
+			break;
+		default:
+			ubwc_bw_limit = 0;
+			break;
+		}
+	}
+
+	if (ubwc_bw_limit) {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->bw_limit, ubwc_bw_limit);
+		CAM_DBG(CAM_ISP, "WM %d ubwc bw limit 0x%x",
+			wm_data->index, ubwc_bw_limit);
+	}
+
+end:
+	return rc;
+}
+
+static int cam_vfe_bus_update_ubwc_regs(
+	struct cam_vfe_bus_ver2_wm_resource_data *wm_data,
+	uint32_t *reg_val_pair,	uint32_t i, uint32_t *j)
+{
+	uint32_t camera_hw_version;
+	int rc = 0;
+
+	if (!wm_data || !reg_val_pair || !j) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc =  cam_cpas_get_cpas_hw_version(&camera_hw_version);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Failed to get HW version rc: %d", rc);
+		goto end;
+	} else if ((camera_hw_version <= CAM_CPAS_TITAN_NONE) ||
+		(camera_hw_version >= CAM_CPAS_TITAN_MAX)) {
+		CAM_ERR(CAM_ISP, "Invalid HW version: %d",
+			camera_hw_version);
+		rc = -EINVAL;
+		goto end;
+	}
+	switch (camera_hw_version) {
+	case CAM_CPAS_TITAN_170_V100:
+	case CAM_CPAS_TITAN_170_V110:
+	case CAM_CPAS_TITAN_170_V120:
+		rc = cam_vfe_bus_update_ubwc_legacy_regs(
+			wm_data, camera_hw_version, reg_val_pair, i, j);
+		break;
+	case CAM_CPAS_TITAN_175_V100:
+	case CAM_CPAS_TITAN_175_V101:
+		rc = cam_vfe_bus_update_ubwc_3_regs(
+			wm_data, reg_val_pair, i, j);
+		break;
+	default:
+		break;
+	}
+
+	if (rc)
+		CAM_ERR(CAM_ISP, "Failed to update ubwc regs rc:%d", rc);
+
+end:
+	return rc;
+}
+
+static int cam_vfe_bus_update_wm(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver2_priv             *bus_priv;
+	struct cam_isp_hw_get_cmd_update         *update_buf;
+	struct cam_buf_io_cfg                    *io_cfg;
+	struct cam_vfe_bus_ver2_vfe_out_data     *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
+	struct cam_vfe_bus_ver2_reg_offset_ubwc_client *ubwc_client = NULL;
+	uint32_t *reg_val_pair;
+	uint32_t  i, j, k, size = 0;
+	uint32_t  frame_inc = 0, val;
+	uint32_t loop_size = 0;
+
+	bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
+	update_buf =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+		update_buf->res->res_priv;
+
+	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	if (update_buf->wm_update->num_buf != vfe_out_data->num_wm) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Invalid number buffers:%d required:%d",
+			update_buf->wm_update->num_buf, vfe_out_data->num_wm);
+		return -EINVAL;
+	}
+
+	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+	io_cfg = update_buf->wm_update->io_cfg;
+
+	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+			CAM_ERR(CAM_ISP,
+				"reg_val_pair %d exceeds the array limit %zu",
+				j, MAX_REG_VAL_PAIR_SIZE);
+			return -ENOMEM;
+		}
+
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+		ubwc_client = wm_data->hw_regs->ubwc_regs;
+		/* update width register */
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->buffer_width_cfg,
+			wm_data->width);
+		CAM_DBG(CAM_ISP, "WM %d image width 0x%x",
+			wm_data->index, reg_val_pair[j-1]);
+
+		/* For initial configuration program all bus registers */
+		val = io_cfg->planes[i].plane_stride;
+		CAM_DBG(CAM_ISP, "before stride %d", val);
+		val = ALIGNUP(val, 16);
+		if (val != io_cfg->planes[i].plane_stride &&
+			val != wm_data->stride)
+			CAM_WARN(CAM_ISP,
+				"Warning stride %u expected %u",
+				io_cfg->planes[i].plane_stride,
+				val);
+
+		if ((wm_data->stride != val ||
+			!wm_data->init_cfg_done) && (wm_data->index >= 3)) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->stride,
+				io_cfg->planes[i].plane_stride);
+			wm_data->stride = val;
+			CAM_DBG(CAM_ISP, "WM %d image stride 0x%x",
+				wm_data->index, reg_val_pair[j-1]);
+		}
+
+		if (wm_data->en_ubwc) {
+			if (!wm_data->hw_regs->ubwc_regs) {
+				CAM_ERR(CAM_ISP,
+					"No UBWC register to configure.");
+				return -EINVAL;
+			}
+			if (wm_data->ubwc_updated) {
+				wm_data->ubwc_updated = false;
+				cam_vfe_bus_update_ubwc_regs(
+					wm_data, reg_val_pair, i, &j);
+			}
+
+			/* UBWC meta address */
+			cam_vfe_bus_update_ubwc_meta_addr(
+				reg_val_pair, &j,
+				wm_data->hw_regs->ubwc_regs,
+				update_buf->wm_update->image_buf[i]);
+			CAM_DBG(CAM_ISP, "WM %d ubwc meta addr 0x%llx",
+				wm_data->index,
+				update_buf->wm_update->image_buf[i]);
+		}
+
+		if (wm_data->en_ubwc) {
+			frame_inc = ALIGNUP(io_cfg->planes[i].plane_stride *
+			    io_cfg->planes[i].slice_height, 4096);
+			frame_inc += io_cfg->planes[i].meta_size;
+			CAM_DBG(CAM_ISP,
+				"WM %d frm %d: ht: %d stride %d meta: %d",
+				wm_data->index, frame_inc,
+				io_cfg->planes[i].slice_height,
+				io_cfg->planes[i].plane_stride,
+				io_cfg->planes[i].meta_size);
+		} else {
+			frame_inc = io_cfg->planes[i].plane_stride *
+				io_cfg->planes[i].slice_height;
+		}
+
+		if (wm_data->index < 3)
+			loop_size = wm_data->irq_subsample_period + 1;
+		else
+			loop_size = 1;
+
+		/* WM Image address */
+		for (k = 0; k < loop_size; k++) {
+			if (wm_data->en_ubwc)
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->image_addr,
+					update_buf->wm_update->image_buf[i] +
+					io_cfg->planes[i].meta_size +
+					k * frame_inc);
+			else
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->image_addr,
+					update_buf->wm_update->image_buf[i] +
+					wm_data->offset + k * frame_inc);
+			CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
+				wm_data->index, reg_val_pair[j-1]);
+		}
+
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->frame_inc, frame_inc);
+		CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
+			wm_data->index, reg_val_pair[j-1]);
+
+
+		/* enable the WM */
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->cfg,
+			wm_data->en_cfg);
+
+		/* set initial configuration done */
+		if (!wm_data->init_cfg_done)
+			wm_data->init_cfg_done = true;
+	}
+
+	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > update_buf->cmd.size) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Buf size:%d insufficient, expected size:%d",
+			update_buf->cmd.size, size);
+		return -ENOMEM;
+	}
+
+	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
+		update_buf->cmd.cmd_buf_addr, j/2, reg_val_pair);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	update_buf->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+static int cam_vfe_bus_update_hfr(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver2_priv             *bus_priv;
+	struct cam_isp_hw_get_cmd_update         *update_hfr;
+	struct cam_vfe_bus_ver2_vfe_out_data     *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
+	struct cam_isp_port_hfr_config           *hfr_cfg = NULL;
+	uint32_t *reg_val_pair;
+	uint32_t  i, j, size = 0;
+
+	bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
+	update_hfr =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+		update_hfr->res->res_priv;
+
+	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+	hfr_cfg = update_hfr->hfr_update;
+
+	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+			CAM_ERR(CAM_ISP,
+				"reg_val_pair %d exceeds the array limit %zu",
+				j, MAX_REG_VAL_PAIR_SIZE);
+			return -ENOMEM;
+		}
+
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+
+		if (wm_data->index <= 2 && hfr_cfg->subsample_period > 3) {
+			CAM_ERR(CAM_ISP,
+				"RDI doesn't support irq subsample period %d",
+				hfr_cfg->subsample_period);
+			return -EINVAL;
+		}
+
+		if ((wm_data->framedrop_pattern !=
+			hfr_cfg->framedrop_pattern) ||
+			!wm_data->hfr_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->framedrop_pattern,
+				hfr_cfg->framedrop_pattern);
+			wm_data->framedrop_pattern = hfr_cfg->framedrop_pattern;
+			CAM_DBG(CAM_ISP, "WM %d framedrop pattern 0x%x",
+				wm_data->index, wm_data->framedrop_pattern);
+		}
+
+		if (wm_data->framedrop_period != hfr_cfg->framedrop_period ||
+			!wm_data->hfr_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->framedrop_period,
+				hfr_cfg->framedrop_period);
+			wm_data->framedrop_period = hfr_cfg->framedrop_period;
+			CAM_DBG(CAM_ISP, "WM %d framedrop period 0x%x",
+				wm_data->index, wm_data->framedrop_period);
+		}
+
+		if (wm_data->irq_subsample_period != hfr_cfg->subsample_period
+			|| !wm_data->hfr_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->irq_subsample_period,
+				hfr_cfg->subsample_period);
+			wm_data->irq_subsample_period =
+				hfr_cfg->subsample_period;
+			CAM_DBG(CAM_ISP, "WM %d irq subsample period 0x%x",
+				wm_data->index, wm_data->irq_subsample_period);
+		}
+
+		if (wm_data->irq_subsample_pattern != hfr_cfg->subsample_pattern
+			|| !wm_data->hfr_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->irq_subsample_pattern,
+				hfr_cfg->subsample_pattern);
+			wm_data->irq_subsample_pattern =
+				hfr_cfg->subsample_pattern;
+			CAM_DBG(CAM_ISP, "WM %d irq subsample pattern 0x%x",
+				wm_data->index, wm_data->irq_subsample_pattern);
+		}
+
+		/* set initial configuration done */
+		if (!wm_data->hfr_cfg_done)
+			wm_data->hfr_cfg_done = true;
+	}
+
+	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > update_hfr->cmd.size) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Buf size:%d insufficient, expected size:%d",
+			update_hfr->cmd.size, size);
+		return -ENOMEM;
+	}
+
+	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
+		update_hfr->cmd.cmd_buf_addr, j/2, reg_val_pair);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	update_hfr->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+static int cam_vfe_bus_update_ubwc_config(void *cmd_args)
+{
+	struct cam_isp_hw_get_cmd_update         *update_ubwc;
+	struct cam_vfe_bus_ver2_vfe_out_data     *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data *wm_data = NULL;
+	struct cam_ubwc_plane_cfg_v1             *ubwc_plane_cfg = NULL;
+	uint32_t                                  i;
+	int                                       rc = 0;
+
+	if (!cmd_args) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	update_ubwc =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+		update_ubwc->res->res_priv;
+
+	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Invalid data");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ubwc_plane_cfg = update_ubwc->ubwc_update;
+
+	for (i = 0; i < vfe_out_data->num_wm; i++) {
+
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+		if (i > 0)
+			ubwc_plane_cfg++;
+
+		if (!wm_data->hw_regs->ubwc_regs) {
+			CAM_ERR(CAM_ISP,
+				"No UBWC register to configure.");
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (!wm_data->en_ubwc) {
+			CAM_ERR(CAM_ISP, "UBWC Disabled");
+			rc = -EINVAL;
+			goto end;
+		}
+
+		if (wm_data->packer_cfg !=
+			ubwc_plane_cfg->packer_config ||
+			!wm_data->init_cfg_done) {
+			wm_data->packer_cfg = ubwc_plane_cfg->packer_config;
+			wm_data->ubwc_updated = true;
+		}
+
+		if ((!wm_data->is_dual) && ((wm_data->tile_cfg !=
+			ubwc_plane_cfg->tile_config)
+			|| !wm_data->init_cfg_done)) {
+			wm_data->tile_cfg = ubwc_plane_cfg->tile_config;
+			wm_data->ubwc_updated = true;
+		}
+
+		if ((!wm_data->is_dual) && ((wm_data->h_init !=
+			ubwc_plane_cfg->h_init) ||
+			!wm_data->init_cfg_done)) {
+			wm_data->h_init = ubwc_plane_cfg->h_init;
+			wm_data->ubwc_updated = true;
+		}
+
+		if (wm_data->v_init != ubwc_plane_cfg->v_init ||
+			!wm_data->init_cfg_done) {
+			wm_data->v_init = ubwc_plane_cfg->v_init;
+			wm_data->ubwc_updated = true;
+		}
+
+		if (wm_data->ubwc_meta_stride !=
+			ubwc_plane_cfg->meta_stride ||
+			!wm_data->init_cfg_done) {
+			wm_data->ubwc_meta_stride = ubwc_plane_cfg->meta_stride;
+			wm_data->ubwc_updated = true;
+		}
+
+		if (wm_data->ubwc_mode_cfg_0 !=
+			ubwc_plane_cfg->mode_config_0 ||
+			!wm_data->init_cfg_done) {
+			wm_data->ubwc_mode_cfg_0 =
+				ubwc_plane_cfg->mode_config_0;
+			wm_data->ubwc_updated = true;
+		}
+
+		if (wm_data->ubwc_mode_cfg_1 !=
+			ubwc_plane_cfg->mode_config_1 ||
+			!wm_data->init_cfg_done) {
+			wm_data->ubwc_mode_cfg_1 =
+				ubwc_plane_cfg->mode_config_1;
+			wm_data->ubwc_updated = true;
+		}
+
+		if (wm_data->ubwc_meta_offset !=
+			ubwc_plane_cfg->meta_offset ||
+			!wm_data->init_cfg_done) {
+			wm_data->ubwc_meta_offset = ubwc_plane_cfg->meta_offset;
+			wm_data->ubwc_updated = true;
+		}
+	}
+
+end:
+	return rc;
+}
+
+
+static int cam_vfe_bus_update_stripe_cfg(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver2_priv                *bus_priv;
+	struct cam_isp_hw_dual_isp_update_args      *stripe_args;
+	struct cam_vfe_bus_ver2_vfe_out_data        *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver2_wm_resource_data    *wm_data = NULL;
+	struct cam_isp_dual_stripe_config           *stripe_config;
+	uint32_t outport_id, ports_plane_idx, i;
+
+	bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
+	stripe_args = (struct cam_isp_hw_dual_isp_update_args *)cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver2_vfe_out_data *)
+		stripe_args->res->res_priv;
+
+	if (!vfe_out_data) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	outport_id = stripe_args->res->res_id & 0xFF;
+	if (stripe_args->res->res_id < CAM_ISP_IFE_OUT_RES_BASE ||
+		stripe_args->res->res_id >= CAM_ISP_IFE_OUT_RES_MAX)
+		return 0;
+
+	ports_plane_idx = (stripe_args->split_id *
+	(stripe_args->dual_cfg->num_ports * CAM_PACKET_MAX_PLANES)) +
+	(outport_id * CAM_PACKET_MAX_PLANES);
+	for (i = 0; i < vfe_out_data->num_wm; i++) {
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+		stripe_config = (struct cam_isp_dual_stripe_config  *)
+			&stripe_args->dual_cfg->stripes[ports_plane_idx + i];
+		wm_data->width = stripe_config->width;
+		wm_data->offset = stripe_config->offset;
+		wm_data->tile_cfg = stripe_config->tileconfig;
+		CAM_DBG(CAM_ISP, "id:%x wm:%d width:0x%x offset:%x tilecfg:%x",
+			stripe_args->res->res_id, i, wm_data->width,
+			wm_data->offset, wm_data->tile_cfg);
+	}
+
+	return 0;
+}
+
+static int cam_vfe_bus_start_hw(void *hw_priv,
+	void *start_hw_args, uint32_t arg_size)
+{
+	return cam_vfe_bus_start_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_stop_hw(void *hw_priv,
+	void *stop_hw_args, uint32_t arg_size)
+{
+	return cam_vfe_bus_stop_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_init_hw(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver2_priv    *bus_priv = hw_priv;
+	uint32_t                         top_irq_reg_mask[2] = {0};
+
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return -EINVAL;
+	}
+
+	top_irq_reg_mask[0] = (1 << 9);
+
+	bus_priv->irq_handle = cam_irq_controller_subscribe_irq(
+		bus_priv->common_data.vfe_irq_controller,
+		CAM_IRQ_PRIORITY_2,
+		top_irq_reg_mask,
+		bus_priv,
+		cam_vfe_bus_ver2_handle_irq,
+		NULL,
+		NULL,
+		NULL);
+
+	if (bus_priv->irq_handle <= 0) {
+		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		return -EFAULT;
+	}
+
+	bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq(
+		bus_priv->common_data.bus_irq_controller,
+		CAM_IRQ_PRIORITY_0,
+		bus_error_irq_mask,
+		bus_priv,
+		cam_vfe_bus_error_irq_top_half,
+		cam_vfe_bus_err_bottom_half,
+		bus_priv->tasklet_info,
+		&tasklet_bh_api);
+
+	if (bus_priv->irq_handle <= 0) {
+		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		return -EFAULT;
+	}
+
+	/*Set Debug Registers*/
+	cam_io_w_mb(CAM_VFE_BUS_SET_DEBUG_REG, bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->debug_status_cfg);
+
+	/* BUS_WR_INPUT_IF_ADDR_SYNC_FRAME_HEADER */
+	cam_io_w_mb(0x0, bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->addr_sync_frame_hdr);
+
+	/* no clock gating at bus input */
+	cam_io_w_mb(0xFFFFF, bus_priv->common_data.mem_base + 0x0000200C);
+
+	/* BUS_WR_TEST_BUS_CTRL */
+	cam_io_w_mb(0x0, bus_priv->common_data.mem_base + 0x0000211C);
+
+	/* if addr_no_sync has default value then config the addr no sync reg */
+	cam_io_w_mb(CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL,
+		bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->addr_sync_no_sync);
+
+	return 0;
+}
+
+static int cam_vfe_bus_deinit_hw(void *hw_priv,
+	void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver2_priv    *bus_priv = hw_priv;
+	int                              rc = 0;
+
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "Error: Invalid args");
+		return -EINVAL;
+	}
+
+	if (bus_priv->error_irq_handle) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			bus_priv->common_data.bus_irq_controller,
+			bus_priv->error_irq_handle);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+				"Failed to unsubscribe error irq rc=%d", rc);
+
+		bus_priv->error_irq_handle = 0;
+	}
+
+	if (bus_priv->irq_handle) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			bus_priv->common_data.vfe_irq_controller,
+			bus_priv->irq_handle);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+				"Failed to unsubscribe irq rc=%d", rc);
+
+		bus_priv->irq_handle = 0;
+	}
+
+	return rc;
+}
+
+static int __cam_vfe_bus_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	return cam_vfe_bus_process_cmd(priv, cmd_type, cmd_args, arg_size);
+}
+
+static int cam_vfe_bus_process_cmd(
+	struct cam_isp_resource_node *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+	struct cam_vfe_bus_ver2_priv		 *bus_priv;
+
+	if (!priv || !cmd_args) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+		rc = cam_vfe_bus_update_wm(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+		rc = cam_vfe_bus_update_hfr(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_SECURE_MODE:
+		rc = cam_vfe_bus_get_secure_mode(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_STRIPE_UPDATE:
+		rc = cam_vfe_bus_update_stripe_cfg(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ:
+		bus_priv = (struct cam_vfe_bus_ver2_priv  *) priv;
+		if (bus_priv->error_irq_handle) {
+			CAM_DBG(CAM_ISP, "Mask off bus error irq handler");
+			rc = cam_irq_controller_unsubscribe_irq(
+				bus_priv->common_data.bus_irq_controller,
+				bus_priv->error_irq_handle);
+			if (rc)
+				CAM_ERR(CAM_ISP,
+					"Failed to unsubscribe error irq rc=%d",
+					rc);
+
+			bus_priv->error_irq_handle = 0;
+		}
+		break;
+	case CAM_ISP_HW_CMD_UBWC_UPDATE:
+		rc = cam_vfe_bus_update_ubwc_config(cmd_args);
+		break;
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d",
+			cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_vfe_bus_ver2_init(
+	struct cam_hw_soc_info               *soc_info,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *vfe_irq_controller,
+	struct cam_vfe_bus                  **vfe_bus)
+{
+	int i, rc = 0;
+	struct cam_vfe_bus_ver2_priv    *bus_priv = NULL;
+	struct cam_vfe_bus              *vfe_bus_local;
+	struct cam_vfe_bus_ver2_hw_info *ver2_hw_info = bus_hw_info;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) {
+		CAM_ERR(CAM_ISP,
+			"Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK",
+			soc_info, hw_intf, bus_hw_info);
+		CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	vfe_bus_local = kzalloc(sizeof(struct cam_vfe_bus), GFP_KERNEL);
+	if (!vfe_bus_local) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	bus_priv = kzalloc(sizeof(struct cam_vfe_bus_ver2_priv),
+		GFP_KERNEL);
+	if (!bus_priv) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv");
+		rc = -ENOMEM;
+		goto free_bus_local;
+	}
+	vfe_bus_local->bus_priv = bus_priv;
+
+	bus_priv->num_client                     = ver2_hw_info->num_client;
+	bus_priv->num_out                        = ver2_hw_info->num_out;
+	bus_priv->common_data.num_sec_out        = 0;
+	bus_priv->common_data.secure_mode        = CAM_SECURE_MODE_NON_SECURE;
+	bus_priv->common_data.core_index         = soc_info->index;
+	bus_priv->common_data.mem_base           =
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX);
+	bus_priv->common_data.hw_intf            = hw_intf;
+	bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
+	bus_priv->common_data.common_reg         = &ver2_hw_info->common_reg;
+	bus_priv->common_data.addr_no_sync       =
+		CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL;
+
+	mutex_init(&bus_priv->common_data.bus_mutex);
+
+	rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
+		&ver2_hw_info->common_reg.irq_reg_info,
+		&bus_priv->common_data.bus_irq_controller);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "cam_irq_controller_init failed");
+		goto free_bus_priv;
+	}
+
+	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+	for (i = 0; i < bus_priv->num_client; i++) {
+		rc = cam_vfe_bus_init_wm_resource(i, bus_priv, bus_hw_info,
+			&bus_priv->bus_client[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init WM failed rc=%d", rc);
+			goto deinit_wm;
+		}
+	}
+
+	for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
+		rc = cam_vfe_bus_init_comp_grp(i, bus_priv, bus_hw_info,
+			&bus_priv->comp_grp[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc);
+			goto deinit_comp_grp;
+		}
+	}
+
+	for (i = 0; i < bus_priv->num_out; i++) {
+		rc = cam_vfe_bus_init_vfe_out_resource(i, bus_priv,
+			bus_hw_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init VFE Out failed rc=%d", rc);
+			goto deinit_vfe_out;
+		}
+	}
+
+	spin_lock_init(&bus_priv->common_data.spin_lock);
+	INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+	for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++) {
+		INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+		list_add_tail(&bus_priv->common_data.evt_payload[i].list,
+			&bus_priv->common_data.free_payload_list);
+	}
+
+	vfe_bus_local->hw_ops.reserve      = cam_vfe_bus_acquire_vfe_out;
+	vfe_bus_local->hw_ops.release      = cam_vfe_bus_release_vfe_out;
+	vfe_bus_local->hw_ops.start        = cam_vfe_bus_start_hw;
+	vfe_bus_local->hw_ops.stop         = cam_vfe_bus_stop_hw;
+	vfe_bus_local->hw_ops.init         = cam_vfe_bus_init_hw;
+	vfe_bus_local->hw_ops.deinit       = cam_vfe_bus_deinit_hw;
+	vfe_bus_local->top_half_handler    = cam_vfe_bus_ver2_handle_irq;
+	vfe_bus_local->bottom_half_handler = NULL;
+	vfe_bus_local->hw_ops.process_cmd  = __cam_vfe_bus_process_cmd;
+
+	*vfe_bus = vfe_bus_local;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+
+deinit_vfe_out:
+	if (i < 0)
+		i = CAM_VFE_BUS_VER2_VFE_OUT_MAX;
+	for (--i; i >= 0; i--)
+		cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+
+deinit_comp_grp:
+	if (i < 0)
+		i = CAM_VFE_BUS_VER2_COMP_GRP_MAX;
+	for (--i; i >= 0; i--)
+		cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+
+deinit_wm:
+	if (i < 0)
+		i = bus_priv->num_client;
+	for (--i; i >= 0; i--)
+		cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+
+free_bus_priv:
+	kfree(vfe_bus_local->bus_priv);
+
+free_bus_local:
+	kfree(vfe_bus_local);
+
+end:
+	return rc;
+}
+
+int cam_vfe_bus_ver2_deinit(
+	struct cam_vfe_bus                  **vfe_bus)
+{
+	int i, rc = 0;
+	struct cam_vfe_bus_ver2_priv    *bus_priv = NULL;
+	struct cam_vfe_bus              *vfe_bus_local;
+
+	if (!vfe_bus || !*vfe_bus) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+	vfe_bus_local = *vfe_bus;
+
+	bus_priv = vfe_bus_local->bus_priv;
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "bus_priv is NULL");
+		rc = -ENODEV;
+		goto free_bus_local;
+	}
+
+	INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+	for (i = 0; i < CAM_VFE_BUS_VER2_PAYLOAD_MAX; i++)
+		INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+
+	for (i = 0; i < bus_priv->num_client; i++) {
+		rc = cam_vfe_bus_deinit_wm_resource(&bus_priv->bus_client[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit WM failed rc=%d", rc);
+	}
+
+	for (i = 0; i < CAM_VFE_BUS_VER2_COMP_GRP_MAX; i++) {
+		rc = cam_vfe_bus_deinit_comp_grp(&bus_priv->comp_grp[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit Comp Grp failed rc=%d", rc);
+	}
+
+	for (i = 0; i < CAM_VFE_BUS_VER2_VFE_OUT_MAX; i++) {
+		rc = cam_vfe_bus_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit VFE Out failed rc=%d", rc);
+	}
+
+	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->free_dual_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+	rc = cam_irq_controller_deinit(
+		&bus_priv->common_data.bus_irq_controller);
+	if (rc)
+		CAM_ERR(CAM_ISP,
+			"Deinit IRQ Controller failed rc=%d", rc);
+
+	mutex_destroy(&bus_priv->common_data.bus_mutex);
+	kfree(vfe_bus_local->bus_priv);
+
+free_bus_local:
+	kfree(vfe_bus_local);
+
+	*vfe_bus = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
new file mode 100644
index 0000000..1ced897
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_BUS_VER2_H_
+#define _CAM_VFE_BUS_VER2_H_
+
+#include "cam_irq_controller.h"
+#include "cam_vfe_bus.h"
+
+#define CAM_VFE_BUS_VER2_MAX_CLIENTS 24
+
+enum cam_vfe_bus_ver2_vfe_core_id {
+	CAM_VFE_BUS_VER2_VFE_CORE_0,
+	CAM_VFE_BUS_VER2_VFE_CORE_1,
+	CAM_VFE_BUS_VER2_VFE_CORE_MAX,
+};
+
+enum cam_vfe_bus_ver2_comp_grp_type {
+	CAM_VFE_BUS_VER2_COMP_GRP_0,
+	CAM_VFE_BUS_VER2_COMP_GRP_1,
+	CAM_VFE_BUS_VER2_COMP_GRP_2,
+	CAM_VFE_BUS_VER2_COMP_GRP_3,
+	CAM_VFE_BUS_VER2_COMP_GRP_4,
+	CAM_VFE_BUS_VER2_COMP_GRP_5,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_0,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_1,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_2,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_3,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_4,
+	CAM_VFE_BUS_VER2_COMP_GRP_DUAL_5,
+	CAM_VFE_BUS_VER2_COMP_GRP_MAX,
+};
+
+enum cam_vfe_bus_ver2_vfe_out_type {
+	CAM_VFE_BUS_VER2_VFE_OUT_RDI0,
+	CAM_VFE_BUS_VER2_VFE_OUT_RDI1,
+	CAM_VFE_BUS_VER2_VFE_OUT_RDI2,
+	CAM_VFE_BUS_VER2_VFE_OUT_RDI3,
+	CAM_VFE_BUS_VER2_VFE_OUT_FULL,
+	CAM_VFE_BUS_VER2_VFE_OUT_DS4,
+	CAM_VFE_BUS_VER2_VFE_OUT_DS16,
+	CAM_VFE_BUS_VER2_VFE_OUT_RAW_DUMP,
+	CAM_VFE_BUS_VER2_VFE_OUT_FD,
+	CAM_VFE_BUS_VER2_VFE_OUT_PDAF,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BE,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_HDR_BHIST,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_TL_BG,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_BF,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_AWB_BG,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_BHIST,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_RS,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_CS,
+	CAM_VFE_BUS_VER2_VFE_OUT_STATS_IHIST,
+	CAM_VFE_BUS_VER2_VFE_OUT_FULL_DISP,
+	CAM_VFE_BUS_VER2_VFE_OUT_DS4_DISP,
+	CAM_VFE_BUS_VER2_VFE_OUT_DS16_DISP,
+	CAM_VFE_BUS_VER2_VFE_OUT_2PD,
+	CAM_VFE_BUS_VER2_VFE_OUT_MAX,
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_common:
+ *
+ * @Brief:        Common registers across all BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t sw_reset;
+	uint32_t cgc_ovd;
+	uint32_t pwr_iso_cfg;
+	uint32_t dual_master_comp_cfg;
+	struct cam_irq_controller_reg_info irq_reg_info;
+	uint32_t comp_error_status;
+	uint32_t comp_ovrwr_status;
+	uint32_t dual_comp_error_status;
+	uint32_t dual_comp_ovrwr_status;
+	uint32_t addr_sync_cfg;
+	uint32_t addr_sync_frame_hdr;
+	uint32_t addr_sync_no_sync;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_ubwc_client:
+ *
+ * @Brief:        UBWC register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_ubwc_client {
+	uint32_t tile_cfg;
+	uint32_t h_init;
+	uint32_t v_init;
+	uint32_t meta_addr;
+	uint32_t meta_offset;
+	uint32_t meta_stride;
+	uint32_t mode_cfg_0;
+	uint32_t bw_limit;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_ubwc_client:
+ *
+ * @Brief:        UBWC register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_ubwc_3_client {
+	uint32_t tile_cfg;
+	uint32_t h_init;
+	uint32_t v_init;
+	uint32_t meta_addr;
+	uint32_t meta_offset;
+	uint32_t meta_stride;
+	uint32_t mode_cfg_0;
+	uint32_t mode_cfg_1;
+	uint32_t bw_limit;
+};
+
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_bus_client:
+ *
+ * @Brief:        Register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver2_reg_offset_bus_client {
+	uint32_t status0;
+	uint32_t status1;
+	uint32_t cfg;
+	uint32_t header_addr;
+	uint32_t header_cfg;
+	uint32_t image_addr;
+	uint32_t image_addr_offset;
+	uint32_t buffer_width_cfg;
+	uint32_t buffer_height_cfg;
+	uint32_t packer_cfg;
+	uint32_t stride;
+	uint32_t irq_subsample_period;
+	uint32_t irq_subsample_pattern;
+	uint32_t framedrop_period;
+	uint32_t framedrop_pattern;
+	uint32_t frame_inc;
+	uint32_t burst_limit;
+	void    *ubwc_regs;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_reg_offset_comp_grp:
+ *
+ * @Brief:        Register offsets for Composite Group registers
+ * comp_mask:     Comp group register address
+ * addr_sync_mask:Address sync group register address
+ */
+struct cam_vfe_bus_ver2_reg_offset_comp_grp {
+	uint32_t                            comp_mask;
+	uint32_t                            addr_sync_mask;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_vfe_out_hw_info:
+ *
+ * @Brief:        HW capability of VFE Bus Client
+ */
+struct cam_vfe_bus_ver2_vfe_out_hw_info {
+	enum cam_vfe_bus_ver2_vfe_out_type  vfe_out_type;
+	uint32_t                            max_width;
+	uint32_t                            max_height;
+};
+
+/*
+ * struct cam_vfe_bus_ver2_hw_info:
+ *
+ * @Brief:            HW register info for entire Bus
+ *
+ * @common_reg:       Common register details
+ * @bus_client_reg:   Bus client register info
+ * @comp_reg_grp:     Composite group register info
+ * @vfe_out_hw_info:  VFE output capability
+ */
+struct cam_vfe_bus_ver2_hw_info {
+	struct cam_vfe_bus_ver2_reg_offset_common common_reg;
+	uint32_t num_client;
+	struct cam_vfe_bus_ver2_reg_offset_bus_client
+		bus_client_reg[CAM_VFE_BUS_VER2_MAX_CLIENTS];
+	struct cam_vfe_bus_ver2_reg_offset_comp_grp
+		comp_grp_reg[CAM_VFE_BUS_VER2_COMP_GRP_MAX];
+	uint32_t num_out;
+	struct cam_vfe_bus_ver2_vfe_out_hw_info
+		vfe_out_hw_info[CAM_VFE_BUS_VER2_VFE_OUT_MAX];
+};
+
+/*
+ * cam_vfe_bus_ver2_init()
+ *
+ * @Brief:                   Initialize Bus layer
+ *
+ * @soc_info:                Soc Information for the associated HW
+ * @hw_intf:                 HW Interface of HW to which this resource belongs
+ * @bus_hw_info:             BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller:      VFE IRQ Controller to use for subscribing to Top
+ *                           level IRQs
+ * @vfe_bus:                 Pointer to vfe_bus structure which will be filled
+ *                           and returned on successful initialize
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_bus_ver2_init(
+	struct cam_hw_soc_info               *soc_info,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *vfe_irq_controller,
+	struct cam_vfe_bus                  **vfe_bus);
+
+/*
+ * cam_vfe_bus_ver2_deinit()
+ *
+ * @Brief:                   Deinitialize Bus layer
+ *
+ * @vfe_bus:                 Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_bus_ver2_deinit(struct cam_vfe_bus     **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
new file mode 100644
index 0000000..044aa91
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_BUS_H_
+#define _CAM_VFE_BUS_H_
+
+#include <uapi/media/cam_isp.h>
+#include "cam_hw_intf.h"
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_BUS_VER_1_0             0x1000
+#define CAM_VFE_BUS_VER_2_0             0x2000
+
+enum cam_vfe_bus_plane_type {
+	PLANE_Y,
+	PLANE_C,
+	PLANE_MAX,
+};
+
+/*
+ * struct cam_vfe_bus:
+ *
+ * @Brief:                   Bus interface structure
+ *
+ * @bus_priv:                Private data of BUS
+ * @hw_ops:                  Hardware interface functions
+ * @top_half_handler:        Top Half handler function
+ * @bottom_half_handler:     Bottom Half handler function
+ */
+struct cam_vfe_bus {
+	void               *bus_priv;
+
+	struct cam_hw_ops              hw_ops;
+	CAM_IRQ_HANDLER_TOP_HALF       top_half_handler;
+	CAM_IRQ_HANDLER_BOTTOM_HALF    bottom_half_handler;
+};
+
+/*
+ * cam_vfe_bus_init()
+ *
+ * @Brief:                   Initialize Bus layer
+ *
+ * @bus_version:             Version of BUS to initialize
+ * @soc_info:                Soc Information for the associated HW
+ * @hw_intf:                 HW Interface of HW to which this resource belongs
+ * @bus_hw_info:             BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller:      VFE IRQ Controller to use for subscribing to Top
+ *                           level IRQs
+ * @vfe_bus:                 Pointer to vfe_bus structure which will be filled
+ *                           and returned on successful initialize
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_bus_init(uint32_t          bus_version,
+	struct cam_hw_soc_info        *soc_info,
+	struct cam_hw_intf            *hw_intf,
+	void                          *bus_hw_info,
+	void                          *vfe_irq_controller,
+	struct cam_vfe_bus            **vfe_bus);
+
+/*
+ * cam_vfe_bus_deinit()
+ *
+ * @Brief:                   Deinitialize Bus layer
+ *
+ * @bus_version:             Version of BUS to deinitialize
+ * @vfe_bus:                 Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_bus_deinit(uint32_t        bus_version,
+	struct cam_vfe_bus           **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
new file mode 100644
index 0000000..41bacf7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/hw_utils/irq_controller
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_camif_lite_ver2.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_top.o cam_vfe_top_ver2.o cam_vfe_camif_ver2.o cam_vfe_rdi.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
new file mode 100644
index 0000000..4b95f52
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_soc.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_camif_lite_ver2.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+
+struct cam_vfe_mux_camif_lite_data {
+	void __iomem                                *mem_base;
+	struct cam_hw_intf                          *hw_intf;
+	struct cam_vfe_camif_lite_ver2_reg          *camif_lite_reg;
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_lite_ver2_reg_data     *reg_data;
+	struct cam_hw_soc_info                      *soc_info;
+	enum cam_isp_hw_sync_mode                    sync_mode;
+};
+
+static int cam_vfe_camif_lite_get_reg_update(
+	struct cam_isp_resource_node          *camif_lite_res,
+	void                                  *cmd_args,
+	uint32_t                               arg_size)
+{
+	uint32_t                               size = 0;
+	uint32_t                               reg_val_pair[2];
+	struct cam_isp_hw_get_cmd_update      *cdm_args = cmd_args;
+	struct cam_cdm_utils_ops              *cdm_util_ops = NULL;
+	struct cam_vfe_mux_camif_lite_data    *rsrc_data = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+		CAM_ERR(CAM_ISP, "Invalid cmd size");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return -EINVAL;
+	}
+
+	cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Invalid CDM ops");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_reg_random(1);
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->cmd.size) {
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+			cdm_args->cmd.size, size);
+		return -EINVAL;
+	}
+
+	rsrc_data = camif_lite_res->res_priv;
+	reg_val_pair[0] = rsrc_data->camif_lite_reg->reg_update_cmd;
+	reg_val_pair[1] = rsrc_data->reg_data->dual_pd_reg_update_cmd_data;
+	CAM_DBG(CAM_ISP, "CAMIF Lite reg_update_cmd %x offset %x",
+		reg_val_pair[1], reg_val_pair[0]);
+
+	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
+		1, reg_val_pair);
+
+	cdm_args->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+int cam_vfe_camif_lite_ver2_acquire_resource(
+	struct cam_isp_resource_node          *camif_lite_res,
+	void                                  *acquire_param)
+{
+	struct cam_vfe_mux_camif_lite_data    *camif_lite_data;
+	struct cam_vfe_acquire_args           *acquire_data;
+	int rc = 0;
+
+	if (!camif_lite_res) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	camif_lite_data = (struct cam_vfe_mux_camif_lite_data *)
+		camif_lite_res->res_priv;
+	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
+
+	camif_lite_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+
+	CAM_DBG(CAM_ISP, "hw id:%d sync_mode=%d",
+		camif_lite_res->hw_intf->hw_idx,
+		camif_lite_data->sync_mode);
+	return rc;
+}
+
+static int cam_vfe_camif_lite_resource_start(
+	struct cam_isp_resource_node         *camif_lite_res)
+{
+	struct cam_vfe_mux_camif_lite_data   *rsrc_data;
+	uint32_t                              val = 0;
+
+	if (!camif_lite_res) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if (camif_lite_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Error! Invalid camif lite res res_state:%d",
+			camif_lite_res->res_state);
+		return -EINVAL;
+	}
+
+	rsrc_data = (struct cam_vfe_mux_camif_lite_data *)
+		camif_lite_res->res_priv;
+
+	/* vfe core config */
+	val = cam_io_r_mb(rsrc_data->mem_base +
+		rsrc_data->common_reg->core_cfg);
+
+	if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
+
+	val |= (1 << rsrc_data->reg_data->dual_pd_path_sel_shift);
+
+	cam_io_w_mb(val, rsrc_data->mem_base +
+		rsrc_data->common_reg->core_cfg);
+
+	CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d",
+		camif_lite_res->hw_intf->hw_idx, val);
+
+	/* epoch config with 20 line */
+	cam_io_w_mb(rsrc_data->reg_data->lite_epoch_line_cfg,
+		rsrc_data->mem_base +
+		rsrc_data->camif_lite_reg->lite_epoch_irq);
+
+	camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	/* Reg Update */
+	cam_io_w_mb(rsrc_data->reg_data->dual_pd_reg_update_cmd_data,
+		rsrc_data->mem_base +
+		rsrc_data->camif_lite_reg->reg_update_cmd);
+	CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d",
+		camif_lite_res->hw_intf->hw_idx,
+		rsrc_data->reg_data->dual_pd_reg_update_cmd_data);
+
+	CAM_DBG(CAM_ISP, "Start Camif Lite IFE %d Done",
+		camif_lite_res->hw_intf->hw_idx);
+	return 0;
+}
+
+static int cam_vfe_camif_lite_resource_stop(
+	struct cam_isp_resource_node             *camif_lite_res)
+{
+	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv;
+	struct cam_vfe_camif_lite_ver2_reg       *camif_lite_reg;
+	int                                       rc = 0;
+	uint32_t                                  val = 0;
+
+	if (!camif_lite_res) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if ((camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) ||
+		(camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE))
+		return 0;
+
+	camif_lite_priv = (struct cam_vfe_mux_camif_lite_data *)
+		camif_lite_res->res_priv;
+	camif_lite_reg = camif_lite_priv->camif_lite_reg;
+
+	val = cam_io_r_mb(camif_lite_priv->mem_base +
+		camif_lite_priv->common_reg->core_cfg);
+	val &= (~(1 << camif_lite_priv->reg_data->dual_pd_path_sel_shift));
+	cam_io_w_mb(val, camif_lite_priv->mem_base +
+		camif_lite_priv->common_reg->core_cfg);
+
+	if (camif_lite_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		camif_lite_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return rc;
+}
+
+static int cam_vfe_camif_lite_process_cmd(
+	struct cam_isp_resource_node *rsrc_node,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!rsrc_node || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+		rc = cam_vfe_camif_lite_get_reg_update(rsrc_node, cmd_args,
+			arg_size);
+		break;
+	default:
+		CAM_ERR(CAM_ISP,
+			"unsupported process command:%d", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_camif_lite_handle_irq_bottom_half(
+	void                                    *handler_priv,
+	void                                    *evt_payload_priv)
+{
+	int                                      ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node            *camif_lite_node;
+	struct cam_vfe_mux_camif_lite_data      *camif_lite_priv;
+	struct cam_vfe_top_irq_evt_payload      *payload;
+	uint32_t                                 irq_status0;
+	uint32_t                                 irq_status1;
+
+	if (!handler_priv || !evt_payload_priv) {
+		CAM_ERR(CAM_ISP, "Invalid params");
+		return ret;
+	}
+
+	camif_lite_node = handler_priv;
+	camif_lite_priv = camif_lite_node->res_priv;
+	payload         = evt_payload_priv;
+	irq_status0     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+	irq_status1     = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+
+	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+
+	switch (payload->evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		if (irq_status0 &
+			camif_lite_priv->reg_data->lite_sof_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received SOF");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		if (irq_status0 &
+			camif_lite_priv->reg_data->lite_epoch0_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received EPOCH");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		if (irq_status0 &
+			camif_lite_priv->reg_data->dual_pd_reg_upd_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_EOF:
+		if (irq_status0 &
+			camif_lite_priv->reg_data->lite_eof_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received EOF\n");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_ERROR:
+		if (irq_status1 &
+			camif_lite_priv->reg_data->lite_error_irq_mask1) {
+			CAM_DBG(CAM_ISP, "Received ERROR\n");
+			ret = CAM_ISP_HW_ERROR_OVERFLOW;
+		} else {
+			ret = CAM_ISP_HW_ERROR_NONE;
+		}
+		break;
+	default:
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "returning status = %d", ret);
+	return ret;
+}
+
+int cam_vfe_camif_lite_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_lite_hw_info,
+	struct cam_isp_resource_node  *camif_lite_node)
+{
+	struct cam_vfe_mux_camif_lite_data       *camif_lite_priv = NULL;
+	struct cam_vfe_camif_lite_ver2_hw_info   *camif_lite_info =
+		camif_lite_hw_info;
+
+	camif_lite_priv = kzalloc(sizeof(*camif_lite_priv),
+		GFP_KERNEL);
+	if (!camif_lite_priv)
+		return -ENOMEM;
+
+	camif_lite_node->res_priv = camif_lite_priv;
+
+	camif_lite_priv->mem_base         =
+		soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+	camif_lite_priv->camif_lite_reg   = camif_lite_info->camif_lite_reg;
+	camif_lite_priv->common_reg       = camif_lite_info->common_reg;
+	camif_lite_priv->reg_data         = camif_lite_info->reg_data;
+	camif_lite_priv->hw_intf          = hw_intf;
+	camif_lite_priv->soc_info         = soc_info;
+
+	camif_lite_node->init    = NULL;
+	camif_lite_node->deinit  = NULL;
+	camif_lite_node->start   = cam_vfe_camif_lite_resource_start;
+	camif_lite_node->stop    = cam_vfe_camif_lite_resource_stop;
+	camif_lite_node->process_cmd = cam_vfe_camif_lite_process_cmd;
+	camif_lite_node->top_half_handler =
+		cam_vfe_camif_lite_handle_irq_top_half;
+	camif_lite_node->bottom_half_handler =
+		cam_vfe_camif_lite_handle_irq_bottom_half;
+
+	return 0;
+}
+
+int cam_vfe_camif_lite_ver2_deinit(
+	struct cam_isp_resource_node  *camif_lite_node)
+{
+	struct cam_vfe_mux_camif_data *camif_lite_priv =
+		camif_lite_node->res_priv;
+
+	camif_lite_node->start = NULL;
+	camif_lite_node->stop  = NULL;
+	camif_lite_node->process_cmd = NULL;
+	camif_lite_node->top_half_handler = NULL;
+	camif_lite_node->bottom_half_handler = NULL;
+
+	camif_lite_node->res_priv = NULL;
+
+	if (!camif_lite_priv) {
+		CAM_ERR(CAM_ISP, "Error! camif_priv is NULL");
+		return -ENODEV;
+	}
+
+	kfree(camif_lite_priv);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
new file mode 100644
index 0000000..37127d33
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver2.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_CAMIF_LITE_VER2_H_
+#define _CAM_VFE_CAMIF_LITE_VER2_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+struct cam_vfe_camif_lite_ver2_reg {
+	uint32_t     camif_lite_cmd;
+	uint32_t     camif_lite_config;
+	uint32_t     lite_skip_period;
+	uint32_t     lite_irq_subsample_pattern;
+	uint32_t     lite_epoch_irq;
+	uint32_t     reg_update_cmd;
+};
+
+struct cam_vfe_camif_lite_ver2_reg_data {
+	uint32_t     dual_pd_reg_update_cmd_data;
+	uint32_t     lite_epoch_line_cfg;
+	uint32_t     lite_sof_irq_mask;
+	uint32_t     lite_epoch0_irq_mask;
+	uint32_t     dual_pd_reg_upd_irq_mask;
+	uint32_t     lite_eof_irq_mask;
+	uint32_t     lite_error_irq_mask0;
+	uint32_t     lite_error_irq_mask1;
+	uint32_t     extern_reg_update_shift;
+	uint32_t     dual_pd_path_sel_shift;
+};
+
+struct cam_vfe_camif_lite_ver2_hw_info {
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_lite_ver2_reg          *camif_lite_reg;
+	struct cam_vfe_camif_lite_ver2_reg_data     *reg_data;
+};
+
+int cam_vfe_camif_lite_ver2_acquire_resource(
+	struct cam_isp_resource_node          *camif_lite_res,
+	void                                  *acquire_param);
+
+int cam_vfe_camif_lite_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_lite_hw_info,
+	struct cam_isp_resource_node  *camif_lite_node);
+
+int cam_vfe_camif_lite_ver2_deinit(
+	struct cam_isp_resource_node  *camif_node);
+
+#endif /* _CAM_VFE_CAMIF_LITE_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
new file mode 100644
index 0000000..8300722
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_soc.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_vfe_camif_ver2.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+#include "cam_cpas_api.h"
+
+#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2
+
+struct cam_vfe_mux_camif_data {
+	void __iomem                                *mem_base;
+	struct cam_hw_intf                          *hw_intf;
+	struct cam_vfe_camif_ver2_reg               *camif_reg;
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_reg_data               *reg_data;
+	struct cam_hw_soc_info                      *soc_info;
+
+	enum cam_isp_hw_sync_mode          sync_mode;
+	uint32_t                           dsp_mode;
+	uint32_t                           pix_pattern;
+	uint32_t                           first_pixel;
+	uint32_t                           first_line;
+	uint32_t                           last_pixel;
+	uint32_t                           last_line;
+	bool                               enable_sof_irq_debug;
+	uint32_t                           irq_debug_cnt;
+	uint32_t                           camif_debug;
+};
+
+static int cam_vfe_camif_validate_pix_pattern(uint32_t pattern)
+{
+	int rc;
+
+	switch (pattern) {
+	case CAM_ISP_PATTERN_BAYER_RGRGRG:
+	case CAM_ISP_PATTERN_BAYER_GRGRGR:
+	case CAM_ISP_PATTERN_BAYER_BGBGBG:
+	case CAM_ISP_PATTERN_BAYER_GBGBGB:
+	case CAM_ISP_PATTERN_YUV_YCBYCR:
+	case CAM_ISP_PATTERN_YUV_YCRYCB:
+	case CAM_ISP_PATTERN_YUV_CBYCRY:
+	case CAM_ISP_PATTERN_YUV_CRYCBY:
+		rc = 0;
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Error! Invalid pix pattern:%d", pattern);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int cam_vfe_camif_get_reg_update(
+	struct cam_isp_resource_node  *camif_res,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                          size = 0;
+	uint32_t                          reg_val_pair[2];
+	struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
+	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
+	struct cam_vfe_mux_camif_data    *rsrc_data = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+		CAM_ERR(CAM_ISP, "Invalid cmd size");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return -EINVAL;
+	}
+
+	cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Invalid CDM ops");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_reg_random(1);
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->cmd.size) {
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+			cdm_args->cmd.size, size);
+		return -EINVAL;
+	}
+
+	rsrc_data = camif_res->res_priv;
+	reg_val_pair[0] = rsrc_data->camif_reg->reg_update_cmd;
+	reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data;
+	CAM_DBG(CAM_ISP, "CAMIF reg_update_cmd %x offset %x",
+		reg_val_pair[1], reg_val_pair[0]);
+
+	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
+		1, reg_val_pair);
+
+	cdm_args->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+int cam_vfe_camif_ver2_acquire_resource(
+	struct cam_isp_resource_node  *camif_res,
+	void                          *acquire_param)
+{
+	struct cam_vfe_mux_camif_data    *camif_data;
+	struct cam_vfe_acquire_args      *acquire_data;
+
+	int rc = 0;
+
+	camif_data   = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+	acquire_data = (struct cam_vfe_acquire_args   *)acquire_param;
+
+	rc = cam_vfe_camif_validate_pix_pattern(
+			acquire_data->vfe_in.in_port->test_pattern);
+	if (rc)
+		return rc;
+
+	camif_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+	camif_data->pix_pattern = acquire_data->vfe_in.in_port->test_pattern;
+	camif_data->dsp_mode    = acquire_data->vfe_in.in_port->dsp_mode;
+	camif_data->first_pixel = acquire_data->vfe_in.in_port->left_start;
+	camif_data->last_pixel  = acquire_data->vfe_in.in_port->left_stop;
+	camif_data->first_line  = acquire_data->vfe_in.in_port->line_start;
+	camif_data->last_line   = acquire_data->vfe_in.in_port->line_stop;
+
+	CAM_DBG(CAM_ISP, "hw id:%d pix_pattern:%d dsp_mode=%d",
+		camif_res->hw_intf->hw_idx,
+		camif_data->pix_pattern, camif_data->dsp_mode);
+	return rc;
+}
+
+static int cam_vfe_camif_resource_init(
+	struct cam_isp_resource_node        *camif_res,
+	void *init_args, uint32_t arg_size)
+{
+	struct cam_vfe_mux_camif_data    *camif_data;
+	struct cam_hw_soc_info           *soc_info;
+	int rc = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	camif_data   = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+	soc_info = camif_data->soc_info;
+
+	if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		rc = cam_vfe_soc_enable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP, "failed to enable dsp clk");
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_resource_deinit(
+	struct cam_isp_resource_node        *camif_res,
+	void *init_args, uint32_t arg_size)
+{
+	struct cam_vfe_mux_camif_data    *camif_data;
+	struct cam_hw_soc_info           *soc_info;
+	int rc = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error Invalid input arguments");
+		return -EINVAL;
+	}
+
+	camif_data   = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+
+	soc_info = camif_data->soc_info;
+
+	if ((camif_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		rc = cam_vfe_soc_disable_clk(soc_info, CAM_VFE_DSP_CLK_NAME);
+		if (rc)
+			CAM_ERR(CAM_ISP, "failed to disable dsp clk");
+	}
+
+	return rc;
+
+}
+
+static int cam_vfe_camif_resource_start(
+	struct cam_isp_resource_node        *camif_res)
+{
+	struct cam_vfe_mux_camif_data       *rsrc_data;
+	uint32_t                             val = 0;
+	uint32_t                             epoch0_irq_mask;
+	uint32_t                             epoch1_irq_mask;
+	uint32_t                             computed_epoch_line_cfg;
+	uint32_t                             camera_hw_version = 0;
+	int                                  rc = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if (camif_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Error! Invalid camif res res_state:%d",
+			camif_res->res_state);
+		return -EINVAL;
+	}
+
+	rsrc_data = (struct cam_vfe_mux_camif_data  *)camif_res->res_priv;
+
+	/*config vfe core*/
+	val = (rsrc_data->pix_pattern <<
+			rsrc_data->reg_data->pixel_pattern_shift);
+	if (rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
+		val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
+
+	if ((rsrc_data->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(rsrc_data->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		/* DSP mode reg val is CAM_ISP_DSP_MODE - 1 */
+		val |= (((rsrc_data->dsp_mode - 1) &
+			rsrc_data->reg_data->dsp_mode_mask) <<
+			rsrc_data->reg_data->dsp_mode_shift);
+		val |= (0x1 << rsrc_data->reg_data->dsp_en_shift);
+	}
+
+	cam_io_w_mb(val, rsrc_data->mem_base + rsrc_data->common_reg->core_cfg);
+
+	CAM_DBG(CAM_ISP, "hw id:%d core_cfg val:%d", camif_res->hw_intf->hw_idx,
+		val);
+
+	/* disable the CGC for stats */
+	cam_io_w_mb(0xFFFFFFFF, rsrc_data->mem_base +
+		rsrc_data->common_reg->module_ctrl[
+		CAM_VFE_TOP_VER2_MODULE_STATS]->cgc_ovd);
+
+	/* get the HW version */
+	rc = cam_cpas_get_cpas_hw_version(&camera_hw_version);
+
+	if (rc) {
+		CAM_ERR(CAM_ISP, "Couldn't find HW version. rc: %d", rc);
+		return rc;
+	}
+
+	/* epoch config */
+	switch (camera_hw_version) {
+	case CAM_CPAS_TITAN_175_V101:
+	case CAM_CPAS_TITAN_175_V100:
+		epoch0_irq_mask = ((rsrc_data->last_line -
+				rsrc_data->first_line) / 2) +
+				rsrc_data->first_line;
+		epoch1_irq_mask = rsrc_data->reg_data->epoch_line_cfg &
+				0xFFFF;
+		computed_epoch_line_cfg = (epoch0_irq_mask << 16) |
+				epoch1_irq_mask;
+		cam_io_w_mb(computed_epoch_line_cfg,
+				rsrc_data->mem_base +
+				rsrc_data->camif_reg->epoch_irq);
+		CAM_DBG(CAM_ISP, "first_line: %u\n"
+				"last_line: %u\n"
+				"epoch_line_cfg: 0x%x",
+				rsrc_data->first_line,
+				rsrc_data->last_line,
+				computed_epoch_line_cfg);
+		break;
+	case CAM_CPAS_TITAN_170_V100:
+	case CAM_CPAS_TITAN_170_V110:
+	case CAM_CPAS_TITAN_170_V120:
+		cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+				rsrc_data->mem_base +
+				rsrc_data->camif_reg->epoch_irq);
+		break;
+	default:
+		cam_io_w_mb(rsrc_data->reg_data->epoch_line_cfg,
+				rsrc_data->mem_base +
+				rsrc_data->camif_reg->epoch_irq);
+		CAM_WARN(CAM_ISP, "Hardware version not proper: 0x%x",
+				camera_hw_version);
+		break;
+	}
+
+	camif_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	/* Reg Update */
+	cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
+		rsrc_data->mem_base + rsrc_data->camif_reg->reg_update_cmd);
+	CAM_DBG(CAM_ISP, "hw id:%d RUP val:%d", camif_res->hw_intf->hw_idx,
+		rsrc_data->reg_data->reg_update_cmd_data);
+
+	/* disable sof irq debug flag */
+	rsrc_data->enable_sof_irq_debug = false;
+	rsrc_data->irq_debug_cnt = 0;
+
+	if (rsrc_data->camif_debug &
+		CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+		val = cam_io_r_mb(rsrc_data->mem_base +
+			rsrc_data->camif_reg->vfe_diag_config);
+		val |= rsrc_data->reg_data->enable_diagnostic_hw;
+		cam_io_w_mb(val, rsrc_data->mem_base +
+			rsrc_data->camif_reg->vfe_diag_config);
+	}
+
+	CAM_DBG(CAM_ISP, "Start Camif IFE %d Done", camif_res->hw_intf->hw_idx);
+	return 0;
+}
+
+static int cam_vfe_camif_reg_dump(
+	struct cam_isp_resource_node *camif_res)
+{
+	struct cam_vfe_mux_camif_data *camif_priv;
+	struct cam_vfe_soc_private *soc_private;
+	int rc = 0, i;
+	uint32_t val = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if ((camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) ||
+		(camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE))
+		return 0;
+
+	camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+	soc_private = camif_priv->soc_info->soc_private;
+	for (i = 0xA3C; i <= 0xA90; i += 4) {
+		val = cam_io_r_mb(camif_priv->mem_base + i);
+		CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+	}
+
+	for (i = 0xE0C; i <= 0xE3C; i += 4) {
+		val = cam_io_r_mb(camif_priv->mem_base + i);
+		CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+	}
+
+	for (i = 0x2000; i <= 0x20B8; i += 4) {
+		val = cam_io_r_mb(camif_priv->mem_base + i);
+		CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+	}
+
+	for (i = 0x2500; i <= 0x255C; i += 4) {
+		val = cam_io_r_mb(camif_priv->mem_base + i);
+		CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+	}
+
+	for (i = 0x2600; i <= 0x265C; i += 4) {
+		val = cam_io_r_mb(camif_priv->mem_base + i);
+		CAM_INFO(CAM_ISP, "offset 0x%x val 0x%x", i, val);
+	}
+
+	cam_cpas_reg_read(soc_private->cpas_handle,
+		CAM_CPAS_REG_CAMNOC, 0x420, true, &val);
+	CAM_INFO(CAM_ISP, "IFE02_MAXWR_LOW offset 0x420 val 0x%x", val);
+
+	cam_cpas_reg_read(soc_private->cpas_handle,
+		CAM_CPAS_REG_CAMNOC, 0x820, true, &val);
+	CAM_INFO(CAM_ISP, "IFE13_MAXWR_LOW offset 0x820 val 0x%x", val);
+
+	return rc;
+}
+
+static int cam_vfe_camif_reg_dump_bh(struct cam_vfe_mux_camif_data *camif_priv)
+{
+	uint32_t offset, val, wm_idx;
+
+	for (offset = 0x0; offset < 0x1000; offset += 0x4) {
+		val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+		CAM_INFO(CAM_ISP, "offset 0x%x value 0x%x", offset, val);
+	}
+
+	for (offset = 0x2000; offset <= 0x20B8; offset += 0x4) {
+		val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+		CAM_INFO(CAM_ISP, "offset 0x%x value 0x%x", offset, val);
+	}
+
+	for (wm_idx = 0; wm_idx <= 23; wm_idx++) {
+		for (offset = 0x2200 + 0x100 * wm_idx;
+			offset < 0x2278 + 0x100 * wm_idx; offset += 0x4) {
+			val = cam_soc_util_r(camif_priv->soc_info, 0, offset);
+			CAM_INFO(CAM_ISP,
+				"offset 0x%x value 0x%x", offset, val);
+		}
+	}
+
+	offset = 0x420;
+	val = cam_soc_util_r(camif_priv->soc_info, 1, offset);
+	CAM_INFO(CAM_ISP, "CAMNOC IFE02 MaxWR_LOW offset 0x%x value 0x%x",
+		offset, val);
+
+	offset = 0x820;
+	val = cam_soc_util_r(camif_priv->soc_info, 1, offset);
+	CAM_INFO(CAM_ISP, "CAMNOC IFE13 MaxWR_LOW offset 0x%x value 0x%x",
+		offset, val);
+
+	return 0;
+}
+
+static int cam_vfe_camif_resource_stop(
+	struct cam_isp_resource_node        *camif_res)
+{
+	struct cam_vfe_mux_camif_data       *camif_priv;
+	struct cam_vfe_camif_ver2_reg       *camif_reg;
+	int rc = 0;
+	uint32_t val = 0;
+
+	if (!camif_res) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED ||
+		camif_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)
+		return 0;
+
+	camif_priv = (struct cam_vfe_mux_camif_data *)camif_res->res_priv;
+	camif_reg = camif_priv->camif_reg;
+
+	if ((camif_priv->dsp_mode >= CAM_ISP_DSP_MODE_ONE_WAY) &&
+		(camif_priv->dsp_mode <= CAM_ISP_DSP_MODE_ROUND)) {
+		val = cam_io_r_mb(camif_priv->mem_base +
+				camif_priv->common_reg->core_cfg);
+		val &= (~(1 << camif_priv->reg_data->dsp_en_shift));
+		cam_io_w_mb(val, camif_priv->mem_base +
+			camif_priv->common_reg->core_cfg);
+	}
+
+	if (camif_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		camif_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	val = cam_io_r_mb(camif_priv->mem_base +
+			camif_priv->camif_reg->vfe_diag_config);
+	if (val & camif_priv->reg_data->enable_diagnostic_hw) {
+		val &= ~camif_priv->reg_data->enable_diagnostic_hw;
+		cam_io_w_mb(val, camif_priv->mem_base +
+			camif_priv->camif_reg->vfe_diag_config);
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_sof_irq_debug(
+	struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+	struct cam_vfe_mux_camif_data *camif_priv;
+	uint32_t *enable_sof_irq = (uint32_t *)cmd_args;
+
+	camif_priv =
+		(struct cam_vfe_mux_camif_data *)rsrc_node->res_priv;
+
+	if (*enable_sof_irq == 1)
+		camif_priv->enable_sof_irq_debug = true;
+	else
+		camif_priv->enable_sof_irq_debug = false;
+
+	return 0;
+}
+
+static int cam_vfe_camif_process_cmd(struct cam_isp_resource_node *rsrc_node,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+	struct cam_vfe_mux_camif_data *camif_priv = NULL;
+
+	if (!rsrc_node || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+		rc = cam_vfe_camif_get_reg_update(rsrc_node, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_REG_DUMP:
+		rc = cam_vfe_camif_reg_dump(rsrc_node);
+		break;
+	case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG:
+		rc = cam_vfe_camif_sof_irq_debug(rsrc_node, cmd_args);
+		break;
+	case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
+		camif_priv =
+			(struct cam_vfe_mux_camif_data *)rsrc_node->res_priv;
+		camif_priv->camif_debug = *((uint32_t *)cmd_args);
+		break;
+	default:
+		CAM_ERR(CAM_ISP,
+			"unsupported process command:%d", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_vfe_camif_handle_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_camif_handle_irq_bottom_half(void *handler_priv,
+	void *evt_payload_priv)
+{
+	int                                   ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node         *camif_node;
+	struct cam_vfe_mux_camif_data        *camif_priv;
+	struct cam_vfe_top_irq_evt_payload   *payload;
+	uint32_t                              irq_status0;
+	uint32_t                              irq_status1;
+	uint32_t                              val;
+
+	if (!handler_priv || !evt_payload_priv) {
+		CAM_ERR(CAM_ISP, "Invalid params");
+		return ret;
+	}
+
+	camif_node = handler_priv;
+	camif_priv = camif_node->res_priv;
+	payload = evt_payload_priv;
+	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+	irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
+
+	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+
+	switch (payload->evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		if (irq_status0 & camif_priv->reg_data->sof_irq_mask) {
+			if ((camif_priv->enable_sof_irq_debug) &&
+				(camif_priv->irq_debug_cnt <=
+				CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
+				CAM_INFO_RATE_LIMIT(CAM_ISP, "Received SOF");
+
+				camif_priv->irq_debug_cnt++;
+				if (camif_priv->irq_debug_cnt ==
+					CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX) {
+					camif_priv->enable_sof_irq_debug =
+						false;
+					camif_priv->irq_debug_cnt = 0;
+				}
+			} else {
+				CAM_DBG(CAM_ISP, "Received SOF");
+			}
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_EPOCH:
+		if (irq_status0 & camif_priv->reg_data->epoch0_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received EPOCH");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		if (irq_status0 & camif_priv->reg_data->reg_update_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received REG_UPDATE_ACK");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_EOF:
+		if (irq_status0 & camif_priv->reg_data->eof_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received EOF\n");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_ERROR:
+		if (irq_status1 & camif_priv->reg_data->error_irq_mask1) {
+			CAM_DBG(CAM_ISP, "Received ERROR\n");
+			ret = CAM_ISP_HW_ERROR_OVERFLOW;
+			cam_vfe_camif_reg_dump_bh(camif_node->res_priv);
+		} else {
+			ret = CAM_ISP_HW_ERROR_NONE;
+		}
+
+		if (camif_priv->camif_debug &
+			CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS) {
+			val = cam_io_r(camif_priv->mem_base +
+				camif_priv->camif_reg->vfe_diag_sensor_status);
+			CAM_DBG(CAM_ISP, "VFE_DIAG_SENSOR_STATUS: 0x%x",
+				camif_priv->mem_base, val);
+		}
+		break;
+	default:
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "returing status = %d", ret);
+	return ret;
+}
+
+int cam_vfe_camif_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_hw_info,
+	struct cam_isp_resource_node  *camif_node)
+{
+	struct cam_vfe_mux_camif_data     *camif_priv = NULL;
+	struct cam_vfe_camif_ver2_hw_info *camif_info = camif_hw_info;
+
+	camif_priv = kzalloc(sizeof(struct cam_vfe_mux_camif_data),
+		GFP_KERNEL);
+	if (!camif_priv) {
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for camif_priv");
+		return -ENOMEM;
+	}
+
+	camif_node->res_priv = camif_priv;
+
+	camif_priv->mem_base    = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+	camif_priv->camif_reg   = camif_info->camif_reg;
+	camif_priv->common_reg  = camif_info->common_reg;
+	camif_priv->reg_data    = camif_info->reg_data;
+	camif_priv->hw_intf     = hw_intf;
+	camif_priv->soc_info    = soc_info;
+
+	camif_node->init    = cam_vfe_camif_resource_init;
+	camif_node->deinit  = cam_vfe_camif_resource_deinit;
+	camif_node->start   = cam_vfe_camif_resource_start;
+	camif_node->stop    = cam_vfe_camif_resource_stop;
+	camif_node->process_cmd = cam_vfe_camif_process_cmd;
+	camif_node->top_half_handler = cam_vfe_camif_handle_irq_top_half;
+	camif_node->bottom_half_handler = cam_vfe_camif_handle_irq_bottom_half;
+
+	return 0;
+}
+
+int cam_vfe_camif_ver2_deinit(
+	struct cam_isp_resource_node  *camif_node)
+{
+	struct cam_vfe_mux_camif_data *camif_priv = camif_node->res_priv;
+
+	camif_node->start = NULL;
+	camif_node->stop  = NULL;
+	camif_node->process_cmd = NULL;
+	camif_node->top_half_handler = NULL;
+	camif_node->bottom_half_handler = NULL;
+
+	camif_node->res_priv = NULL;
+
+	if (!camif_priv) {
+		CAM_ERR(CAM_ISP, "Error! camif_priv is NULL");
+		return -ENODEV;
+	}
+
+	kfree(camif_priv);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
new file mode 100644
index 0000000..01f29f1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver2.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_CAMIF_VER2_H_
+#define _CAM_VFE_CAMIF_VER2_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+/*
+ * Debug values for camif module
+ */
+#define CAMIF_DEBUG_ENABLE_SENSOR_DIAG_STATUS      BIT(0)
+
+struct cam_vfe_camif_ver2_reg {
+	uint32_t     camif_cmd;
+	uint32_t     camif_config;
+	uint32_t     line_skip_pattern;
+	uint32_t     pixel_skip_pattern;
+	uint32_t     skip_period;
+	uint32_t     irq_subsample_pattern;
+	uint32_t     epoch_irq;
+	uint32_t     raw_crop_width_cfg;
+	uint32_t     raw_crop_height_cfg;
+	uint32_t     reg_update_cmd;
+	uint32_t     vfe_diag_config;
+	uint32_t     vfe_diag_sensor_status;
+};
+
+struct cam_vfe_camif_reg_data {
+	uint32_t     raw_crop_first_pixel_shift;
+	uint32_t     raw_crop_first_pixel_mask;
+
+	uint32_t     raw_crop_last_pixel_shift;
+	uint32_t     raw_crop_last_pixel_mask;
+
+	uint32_t     raw_crop_first_line_shift;
+	uint32_t     raw_crop_first_line_mask;
+
+	uint32_t     raw_crop_last_line_shift;
+	uint32_t     raw_crop_last_line_mask;
+
+	uint32_t     input_mux_sel_shift;
+	uint32_t     input_mux_sel_mask;
+	uint32_t     extern_reg_update_shift;
+	uint32_t     extern_reg_update_mask;
+
+	uint32_t     pixel_pattern_shift;
+	uint32_t     pixel_pattern_mask;
+
+	uint32_t     dsp_mode_shift;
+	uint32_t     dsp_mode_mask;
+	uint32_t     dsp_en_shift;
+	uint32_t     dsp_en_mask;
+
+	uint32_t     reg_update_cmd_data;
+	uint32_t     epoch_line_cfg;
+	uint32_t     sof_irq_mask;
+	uint32_t     epoch0_irq_mask;
+	uint32_t     reg_update_irq_mask;
+	uint32_t     eof_irq_mask;
+	uint32_t     error_irq_mask0;
+	uint32_t     error_irq_mask1;
+
+	uint32_t     enable_diagnostic_hw;
+};
+
+struct cam_vfe_camif_ver2_hw_info {
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_camif_ver2_reg               *camif_reg;
+	struct cam_vfe_camif_reg_data               *reg_data;
+};
+
+int cam_vfe_camif_ver2_acquire_resource(
+	struct cam_isp_resource_node  *camif_res,
+	void                          *acquire_param);
+
+int cam_vfe_camif_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *camif_hw_info,
+	struct cam_isp_resource_node  *camif_node);
+
+int cam_vfe_camif_ver2_deinit(
+	struct cam_isp_resource_node  *camif_node);
+
+#endif /* _CAM_VFE_CAMIF_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
new file mode 100644
index 0000000..c0a35fe
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "cam_vfe_rdi.h"
+#include "cam_isp_hw_mgr_intf.h"
+#include "cam_isp_hw.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+
+struct cam_vfe_mux_rdi_data {
+	void __iomem                                *mem_base;
+	struct cam_hw_intf                          *hw_intf;
+	struct cam_vfe_top_ver2_reg_offset_common   *common_reg;
+	struct cam_vfe_rdi_ver2_reg                 *rdi_reg;
+	struct cam_vfe_rdi_reg_data                 *reg_data;
+
+	enum cam_isp_hw_sync_mode          sync_mode;
+};
+
+static int cam_vfe_rdi_get_reg_update(
+	struct cam_isp_resource_node  *rdi_res,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                          size = 0;
+	uint32_t                          reg_val_pair[2];
+	struct cam_isp_hw_get_cmd_update *cdm_args = cmd_args;
+	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
+	struct cam_vfe_mux_rdi_data      *rsrc_data = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+		CAM_ERR(CAM_ISP, "Error - Invalid cmd size");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res) {
+		CAM_ERR(CAM_ISP, "Error - Invalid args");
+		return -EINVAL;
+	}
+
+	cdm_util_ops = (struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+	if (!cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Error - Invalid CDM ops");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_reg_random(1);
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->cmd.size) {
+		CAM_ERR(CAM_ISP,
+			"Error - buf size:%d is not sufficient, expected: %d",
+			cdm_args->cmd.size, size * 4);
+		return -EINVAL;
+	}
+
+	rsrc_data = rdi_res->res_priv;
+	reg_val_pair[0] = rsrc_data->rdi_reg->reg_update_cmd;
+	reg_val_pair[1] = rsrc_data->reg_data->reg_update_cmd_data;
+	CAM_DBG(CAM_ISP, "RDI%d reg_update_cmd %x",
+		rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0, reg_val_pair[1]);
+
+	cdm_util_ops->cdm_write_regrandom(cdm_args->cmd.cmd_buf_addr,
+		1, reg_val_pair);
+	cdm_args->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+int cam_vfe_rdi_ver2_acquire_resource(
+	struct cam_isp_resource_node  *rdi_res,
+	void                          *acquire_param)
+{
+	struct cam_vfe_mux_rdi_data   *rdi_data;
+	struct cam_vfe_acquire_args   *acquire_data;
+
+	rdi_data     = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
+	acquire_data = (struct cam_vfe_acquire_args *)acquire_param;
+
+	rdi_data->sync_mode   = acquire_data->vfe_in.sync_mode;
+
+	return 0;
+}
+
+static int cam_vfe_rdi_resource_start(
+	struct cam_isp_resource_node  *rdi_res)
+{
+	struct cam_vfe_mux_rdi_data   *rsrc_data;
+	int                            rc = 0;
+
+	if (!rdi_res) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if (rdi_res->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Error! Invalid rdi res res_state:%d",
+			rdi_res->res_state);
+		return -EINVAL;
+	}
+
+	rsrc_data = (struct cam_vfe_mux_rdi_data  *)rdi_res->res_priv;
+	rdi_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	/* Reg Update */
+	cam_io_w_mb(rsrc_data->reg_data->reg_update_cmd_data,
+		rsrc_data->mem_base + rsrc_data->rdi_reg->reg_update_cmd);
+
+	CAM_DBG(CAM_ISP, "Start RDI %d",
+		rdi_res->res_id - CAM_ISP_HW_VFE_IN_RDI0);
+
+	return rc;
+}
+
+
+static int cam_vfe_rdi_resource_stop(
+	struct cam_isp_resource_node        *rdi_res)
+{
+	struct cam_vfe_mux_rdi_data           *rdi_priv;
+	int rc = 0;
+
+	if (!rdi_res) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_RESERVED ||
+		rdi_res->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE)
+		return 0;
+
+	rdi_priv = (struct cam_vfe_mux_rdi_data *)rdi_res->res_priv;
+
+	if (rdi_res->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		rdi_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+
+	return rc;
+}
+
+static int cam_vfe_rdi_process_cmd(struct cam_isp_resource_node *rsrc_node,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+
+	if (!rsrc_node || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+		rc = cam_vfe_rdi_get_reg_update(rsrc_node, cmd_args,
+			arg_size);
+		break;
+	default:
+		CAM_ERR(CAM_ISP,
+			"unsupported RDI process command:%d", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_vfe_rdi_handle_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_rdi_handle_irq_bottom_half(void *handler_priv,
+	void *evt_payload_priv)
+{
+	int                                  ret = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node        *rdi_node;
+	struct cam_vfe_mux_rdi_data         *rdi_priv;
+	struct cam_vfe_top_irq_evt_payload  *payload;
+	uint32_t                             irq_status0;
+
+	if (!handler_priv || !evt_payload_priv) {
+		CAM_ERR(CAM_ISP, "Invalid params");
+		return ret;
+	}
+
+	rdi_node = handler_priv;
+	rdi_priv = rdi_node->res_priv;
+	payload = evt_payload_priv;
+	irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
+
+	CAM_DBG(CAM_ISP, "event ID:%d", payload->evt_id);
+	CAM_DBG(CAM_ISP, "irq_status_0 = %x", irq_status0);
+
+	switch (payload->evt_id) {
+	case CAM_ISP_HW_EVENT_SOF:
+		if (irq_status0 & rdi_priv->reg_data->sof_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received SOF");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	case CAM_ISP_HW_EVENT_REG_UPDATE:
+		if (irq_status0 & rdi_priv->reg_data->reg_update_irq_mask) {
+			CAM_DBG(CAM_ISP, "Received REG UPDATE");
+			ret = CAM_VFE_IRQ_STATUS_SUCCESS;
+		}
+		break;
+	default:
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "returing status = %d", ret);
+	return ret;
+}
+
+int cam_vfe_rdi_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *rdi_hw_info,
+	struct cam_isp_resource_node  *rdi_node)
+{
+	struct cam_vfe_mux_rdi_data     *rdi_priv = NULL;
+	struct cam_vfe_rdi_ver2_hw_info *rdi_info = rdi_hw_info;
+
+	rdi_priv = kzalloc(sizeof(struct cam_vfe_mux_rdi_data),
+			GFP_KERNEL);
+	if (!rdi_priv) {
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for rdi_priv");
+		return -ENOMEM;
+	}
+
+	rdi_node->res_priv = rdi_priv;
+
+	rdi_priv->mem_base   = soc_info->reg_map[VFE_CORE_BASE_IDX].mem_base;
+	rdi_priv->hw_intf    = hw_intf;
+	rdi_priv->common_reg = rdi_info->common_reg;
+	rdi_priv->rdi_reg    = rdi_info->rdi_reg;
+
+	switch (rdi_node->res_id) {
+	case CAM_ISP_HW_VFE_IN_RDI0:
+		rdi_priv->reg_data = rdi_info->reg_data[0];
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI1:
+		rdi_priv->reg_data = rdi_info->reg_data[1];
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI2:
+		rdi_priv->reg_data = rdi_info->reg_data[2];
+		break;
+	case CAM_ISP_HW_VFE_IN_RDI3:
+		if (rdi_info->reg_data[3]) {
+			rdi_priv->reg_data = rdi_info->reg_data[3];
+		} else {
+			CAM_ERR(CAM_ISP, "Error! RDI3 is not supported");
+			goto err_init;
+		}
+		break;
+	default:
+		CAM_DBG(CAM_ISP, "invalid Resource id:%d", rdi_node->res_id);
+		goto err_init;
+	}
+
+	rdi_node->start = cam_vfe_rdi_resource_start;
+	rdi_node->stop  = cam_vfe_rdi_resource_stop;
+	rdi_node->process_cmd = cam_vfe_rdi_process_cmd;
+	rdi_node->top_half_handler = cam_vfe_rdi_handle_irq_top_half;
+	rdi_node->bottom_half_handler = cam_vfe_rdi_handle_irq_bottom_half;
+
+	return 0;
+err_init:
+	kfree(rdi_priv);
+	return -EINVAL;
+}
+
+int cam_vfe_rdi_ver2_deinit(
+	struct cam_isp_resource_node  *rdi_node)
+{
+	struct cam_vfe_mux_rdi_data *rdi_priv = rdi_node->res_priv;
+
+	rdi_node->start = NULL;
+	rdi_node->stop  = NULL;
+	rdi_node->process_cmd = NULL;
+	rdi_node->top_half_handler = NULL;
+	rdi_node->bottom_half_handler = NULL;
+
+	rdi_node->res_priv = NULL;
+
+	if (!rdi_priv) {
+		CAM_ERR(CAM_ISP, "Error! rdi_priv NULL");
+		return -ENODEV;
+	}
+	kfree(rdi_priv);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
new file mode 100644
index 0000000..300acca
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_rdi.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_RDI_H_
+#define _CAM_VFE_RDI_H_
+
+#include "cam_isp_hw.h"
+#include "cam_vfe_top.h"
+
+#define CAM_VFE_RDI_VER2_MAX  4
+
+struct cam_vfe_rdi_ver2_reg {
+	uint32_t     reg_update_cmd;
+};
+
+struct cam_vfe_rdi_reg_data {
+	uint32_t     reg_update_cmd_data;
+	uint32_t     sof_irq_mask;
+	uint32_t     reg_update_irq_mask;
+};
+
+struct cam_vfe_rdi_ver2_hw_info {
+	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
+	struct cam_vfe_rdi_ver2_reg                *rdi_reg;
+	struct cam_vfe_rdi_reg_data  *reg_data[CAM_VFE_RDI_VER2_MAX];
+};
+
+int cam_vfe_rdi_ver2_acquire_resource(
+	struct cam_isp_resource_node  *rdi_res,
+	void                          *acquire_param);
+
+int cam_vfe_rdi_ver2_init(
+	struct cam_hw_intf            *hw_intf,
+	struct cam_hw_soc_info        *soc_info,
+	void                          *rdi_hw_info,
+	struct cam_isp_resource_node  *rdi_node);
+
+int cam_vfe_rdi_ver2_deinit(
+	struct cam_isp_resource_node  *rdi_node);
+
+#endif /* _CAM_VFE_RDI_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
new file mode 100644
index 0000000..585ea2b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_debug_util.h"
+
+int cam_vfe_top_init(uint32_t          top_version,
+	struct cam_hw_soc_info        *soc_info,
+	struct cam_hw_intf            *hw_intf,
+	void                          *top_hw_info,
+	struct cam_vfe_top            **vfe_top)
+{
+	int rc = -EINVAL;
+
+	switch (top_version) {
+	case CAM_VFE_TOP_VER_2_0:
+		rc = cam_vfe_top_ver2_init(soc_info, hw_intf, top_hw_info,
+			vfe_top);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_deinit(uint32_t        top_version,
+	struct cam_vfe_top           **vfe_top)
+{
+	int rc = -EINVAL;
+
+	switch (top_version) {
+	case CAM_VFE_TOP_VER_2_0:
+		rc = cam_vfe_top_ver2_deinit(vfe_top);
+		break;
+	default:
+		CAM_ERR(CAM_ISP, "Error! Unsupported Version %x", top_version);
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
new file mode 100644
index 0000000..31d9a6c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.c
@@ -0,0 +1,831 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/slab.h>
+#include "cam_io_util.h"
+#include "cam_cdm_util.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_vfe_top.h"
+#include "cam_vfe_top_ver2.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+#include "cam_vfe_soc.h"
+
+#define CAM_VFE_HW_RESET_HW_AND_REG_VAL       0x00003F9F
+#define CAM_VFE_HW_RESET_HW_VAL               0x00003F87
+#define CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES 3
+
+struct cam_vfe_top_ver2_common_data {
+	struct cam_hw_soc_info                     *soc_info;
+	struct cam_hw_intf                         *hw_intf;
+	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
+};
+
+struct cam_vfe_top_ver2_priv {
+	struct cam_vfe_top_ver2_common_data common_data;
+	struct cam_isp_resource_node        mux_rsrc[CAM_VFE_TOP_VER2_MUX_MAX];
+	unsigned long                       hw_clk_rate;
+	struct cam_axi_vote                 applied_axi_vote;
+	struct cam_axi_vote             req_axi_vote[CAM_VFE_TOP_VER2_MUX_MAX];
+	unsigned long                   req_clk_rate[CAM_VFE_TOP_VER2_MUX_MAX];
+	struct cam_axi_vote             last_vote[CAM_VFE_TOP_VER2_MUX_MAX *
+					CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES];
+	uint32_t                        last_counter;
+	enum cam_vfe_bw_control_action
+		axi_vote_control[CAM_VFE_TOP_VER2_MUX_MAX];
+};
+
+static int cam_vfe_top_mux_get_base(struct cam_vfe_top_ver2_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	uint32_t                          size = 0;
+	uint32_t                          mem_base = 0;
+	struct cam_isp_hw_get_cmd_update *cdm_args  = cmd_args;
+	struct cam_cdm_utils_ops         *cdm_util_ops = NULL;
+
+	if (arg_size != sizeof(struct cam_isp_hw_get_cmd_update)) {
+		CAM_ERR(CAM_ISP, "Error! Invalid cmd size");
+		return -EINVAL;
+	}
+
+	if (!cdm_args || !cdm_args->res || !top_priv ||
+		!top_priv->common_data.soc_info) {
+		CAM_ERR(CAM_ISP, "Error! Invalid args");
+		return -EINVAL;
+	}
+
+	cdm_util_ops =
+		(struct cam_cdm_utils_ops *)cdm_args->res->cdm_ops;
+
+	if (!cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Invalid CDM ops");
+		return -EINVAL;
+	}
+
+	size = cdm_util_ops->cdm_required_size_changebase();
+	/* since cdm returns dwords, we need to convert it into bytes */
+	if ((size * 4) > cdm_args->cmd.size) {
+		CAM_ERR(CAM_ISP, "buf size:%d is not sufficient, expected: %d",
+			cdm_args->cmd.size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(
+		top_priv->common_data.soc_info, VFE_CORE_BASE_IDX);
+	CAM_DBG(CAM_ISP, "core %d mem_base 0x%x",
+		top_priv->common_data.soc_info->index, mem_base);
+
+	cdm_util_ops->cdm_write_changebase(
+	cdm_args->cmd.cmd_buf_addr, mem_base);
+	cdm_args->cmd.used_bytes = (size * 4);
+
+	return 0;
+}
+
+static int cam_vfe_top_set_hw_clk_rate(
+	struct cam_vfe_top_ver2_priv *top_priv)
+{
+	struct cam_hw_soc_info        *soc_info = NULL;
+	int                            i, rc = 0;
+	unsigned long                  max_clk_rate = 0;
+
+	soc_info = top_priv->common_data.soc_info;
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		if (top_priv->req_clk_rate[i] > max_clk_rate)
+			max_clk_rate = top_priv->req_clk_rate[i];
+	}
+	if (max_clk_rate == top_priv->hw_clk_rate)
+		return 0;
+
+	CAM_DBG(CAM_ISP, "VFE: Clock name=%s idx=%d clk=%llu",
+		soc_info->clk_name[soc_info->src_clk_idx],
+		soc_info->src_clk_idx, max_clk_rate);
+
+	rc = cam_soc_util_set_src_clk_rate(soc_info, max_clk_rate);
+
+	if (!rc)
+		top_priv->hw_clk_rate = max_clk_rate;
+	else
+		CAM_ERR(CAM_ISP, "Set Clock rate failed, rc=%d", rc);
+
+	return rc;
+}
+
+static int cam_vfe_top_set_axi_bw_vote(
+	struct cam_vfe_top_ver2_priv *top_priv,
+	bool start_stop)
+{
+	struct cam_axi_vote sum = {0, 0};
+	struct cam_axi_vote to_be_applied_axi_vote = {0, 0};
+	int i, rc = 0;
+	struct cam_hw_soc_info   *soc_info =
+		top_priv->common_data.soc_info;
+	struct cam_vfe_soc_private *soc_private =
+		soc_info->soc_private;
+	bool apply_bw_update = false;
+
+	if (!soc_private) {
+		CAM_ERR(CAM_ISP, "Error soc_private NULL");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		if (top_priv->axi_vote_control[i] ==
+			CAM_VFE_BW_CONTROL_INCLUDE) {
+			sum.uncompressed_bw +=
+				top_priv->req_axi_vote[i].uncompressed_bw;
+			sum.compressed_bw +=
+				top_priv->req_axi_vote[i].compressed_bw;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Updating BW from (%llu %llu) to (%llu %llu)",
+		top_priv->applied_axi_vote.uncompressed_bw,
+		top_priv->applied_axi_vote.compressed_bw,
+		sum.uncompressed_bw,
+		sum.compressed_bw);
+
+	top_priv->last_vote[top_priv->last_counter] = sum;
+	top_priv->last_counter = (top_priv->last_counter + 1) %
+		(CAM_VFE_TOP_VER2_MUX_MAX *
+		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+
+	if ((top_priv->applied_axi_vote.uncompressed_bw ==
+		sum.uncompressed_bw) &&
+		(top_priv->applied_axi_vote.compressed_bw ==
+		sum.compressed_bw)) {
+		CAM_DBG(CAM_ISP, "BW config unchanged %llu %llu",
+			top_priv->applied_axi_vote.uncompressed_bw,
+			top_priv->applied_axi_vote.compressed_bw);
+		return 0;
+	}
+
+	if (start_stop == true) {
+		/* need to vote current request immediately */
+		to_be_applied_axi_vote = sum;
+		/* Reset everything, we can start afresh */
+		memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+			(CAM_VFE_TOP_VER2_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+		top_priv->last_counter = 0;
+		top_priv->last_vote[top_priv->last_counter] = sum;
+		top_priv->last_counter = (top_priv->last_counter + 1) %
+			(CAM_VFE_TOP_VER2_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES);
+	} else {
+		/*
+		 * Find max bw request in last few frames. This will the bw
+		 *that we want to vote to CPAS now.
+		 */
+		for (i = 0; i < (CAM_VFE_TOP_VER2_MUX_MAX *
+			CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES); i++) {
+			if (to_be_applied_axi_vote.compressed_bw <
+				top_priv->last_vote[i].compressed_bw)
+				to_be_applied_axi_vote.compressed_bw =
+					top_priv->last_vote[i].compressed_bw;
+
+			if (to_be_applied_axi_vote.uncompressed_bw <
+				top_priv->last_vote[i].uncompressed_bw)
+				to_be_applied_axi_vote.uncompressed_bw =
+					top_priv->last_vote[i].uncompressed_bw;
+		}
+	}
+
+	if ((to_be_applied_axi_vote.uncompressed_bw !=
+		top_priv->applied_axi_vote.uncompressed_bw) ||
+		(to_be_applied_axi_vote.compressed_bw !=
+		top_priv->applied_axi_vote.compressed_bw))
+		apply_bw_update = true;
+
+	CAM_DBG(CAM_ISP, "apply_bw_update=%d", apply_bw_update);
+
+	if (apply_bw_update == true) {
+		rc = cam_cpas_update_axi_vote(
+			soc_private->cpas_handle,
+			&to_be_applied_axi_vote);
+		if (!rc) {
+			top_priv->applied_axi_vote.uncompressed_bw =
+				to_be_applied_axi_vote.uncompressed_bw;
+			top_priv->applied_axi_vote.compressed_bw =
+				to_be_applied_axi_vote.compressed_bw;
+		} else {
+			CAM_ERR(CAM_ISP, "BW request failed, rc=%d", rc);
+		}
+	}
+
+	return rc;
+}
+
+static int cam_vfe_top_clock_update(
+	struct cam_vfe_top_ver2_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_clock_update_args     *clk_update = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   i, rc = 0;
+
+	clk_update =
+		(struct cam_vfe_clock_update_args *)cmd_args;
+	res = clk_update->node_res;
+
+	if (!res || !res->hw_intf->hw_priv) {
+		CAM_ERR(CAM_ISP, "Invalid input res %pK", res);
+		return -EINVAL;
+	}
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+			top_priv->req_clk_rate[i] = clk_update->clk_rate;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_DBG(CAM_ISP,
+			"VFE:%d Not ready to set clocks yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else
+		rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+
+	return rc;
+}
+
+static int cam_vfe_top_bw_update(
+	struct cam_vfe_top_ver2_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_bw_update_args        *bw_update = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_update = (struct cam_vfe_bw_update_args *)cmd_args;
+	res = bw_update->node_res;
+
+	if (!res || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+			top_priv->req_axi_vote[i].uncompressed_bw =
+				bw_update->camnoc_bw_bytes;
+			top_priv->req_axi_vote[i].compressed_bw =
+				bw_update->external_bw_bytes;
+			top_priv->axi_vote_control[i] =
+				CAM_VFE_BW_CONTROL_INCLUDE;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"VFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else
+		rc = cam_vfe_top_set_axi_bw_vote(top_priv, false);
+
+	return rc;
+}
+
+static int cam_vfe_top_bw_control(
+	struct cam_vfe_top_ver2_priv *top_priv,
+	 void *cmd_args, uint32_t arg_size)
+{
+	struct cam_vfe_bw_control_args       *bw_ctrl = NULL;
+	struct cam_isp_resource_node         *res = NULL;
+	struct cam_hw_info                   *hw_info = NULL;
+	int                                   rc = 0;
+	int                                   i;
+
+	bw_ctrl = (struct cam_vfe_bw_control_args *)cmd_args;
+	res = bw_ctrl->node_res;
+
+	if (!res || !res->hw_intf->hw_priv)
+		return -EINVAL;
+
+	hw_info = res->hw_intf->hw_priv;
+
+	if (res->res_type != CAM_ISP_RESOURCE_VFE_IN ||
+		res->res_id >= CAM_ISP_HW_VFE_IN_MAX) {
+		CAM_ERR(CAM_ISP, "VFE:%d Invalid res_type:%d res id%d",
+			res->hw_intf->hw_idx, res->res_type,
+			res->res_id);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id == res->res_id) {
+			top_priv->axi_vote_control[i] = bw_ctrl->action;
+			break;
+		}
+	}
+
+	if (hw_info->hw_state != CAM_HW_STATE_POWER_UP) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"VFE:%d Not ready to set BW yet :%d",
+			res->hw_intf->hw_idx,
+			hw_info->hw_state);
+	} else {
+		rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
+	}
+
+	return rc;
+}
+
+static int cam_vfe_top_mux_get_reg_update(
+	struct cam_vfe_top_ver2_priv *top_priv,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_isp_hw_get_cmd_update  *cmd_update = cmd_args;
+
+	if (cmd_update->res->process_cmd)
+		return cmd_update->res->process_cmd(cmd_update->res,
+			CAM_ISP_HW_CMD_GET_REG_UPDATE, cmd_args, arg_size);
+
+	return -EINVAL;
+}
+
+int cam_vfe_top_get_hw_caps(void *device_priv,
+	void *get_hw_cap_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_reset(void *device_priv,
+	void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv   *top_priv = device_priv;
+	struct cam_hw_soc_info         *soc_info = NULL;
+	struct cam_vfe_top_ver2_reg_offset_common *reg_common = NULL;
+	uint32_t *reset_reg_args = reset_core_args;
+	uint32_t reset_reg_val;
+
+	if (!top_priv || !reset_reg_args) {
+		CAM_ERR(CAM_ISP, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	switch (*reset_reg_args) {
+	case CAM_VFE_HW_RESET_HW_AND_REG:
+		reset_reg_val = CAM_VFE_HW_RESET_HW_AND_REG_VAL;
+		break;
+	default:
+		reset_reg_val = CAM_VFE_HW_RESET_HW_VAL;
+		break;
+	}
+
+	CAM_DBG(CAM_ISP, "reset reg value: %x", reset_reg_val);
+	soc_info = top_priv->common_data.soc_info;
+	reg_common = top_priv->common_data.common_reg;
+
+	/* Mask All the IRQs except RESET */
+	cam_io_w_mb((1 << 31),
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) + 0x5C);
+
+	/* Reset HW */
+	cam_io_w_mb(reset_reg_val,
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX) +
+		reg_common->global_reset_cmd);
+
+	CAM_DBG(CAM_ISP, "Reset HW exit");
+	return 0;
+}
+
+int cam_vfe_top_reserve(void *device_priv,
+	void *reserve_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_vfe_acquire_args             *args;
+	struct cam_vfe_hw_vfe_in_acquire_args   *acquire_args;
+	uint32_t i;
+	int rc = -EINVAL;
+
+	if (!device_priv || !reserve_args) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver2_priv   *)device_priv;
+	args = (struct cam_vfe_acquire_args *)reserve_args;
+	acquire_args = &args->vfe_in;
+
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		if (top_priv->mux_rsrc[i].res_id ==  acquire_args->res_id &&
+			top_priv->mux_rsrc[i].res_state ==
+			CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+
+			if (acquire_args->res_id == CAM_ISP_HW_VFE_IN_CAMIF) {
+				rc = cam_vfe_camif_ver2_acquire_resource(
+					&top_priv->mux_rsrc[i],
+					args);
+				if (rc)
+					break;
+			}
+
+			if (acquire_args->res_id ==
+				CAM_ISP_HW_VFE_IN_CAMIF_LITE) {
+				rc = cam_vfe_camif_lite_ver2_acquire_resource(
+					&top_priv->mux_rsrc[i],
+					args);
+				if (rc)
+					break;
+			}
+
+			top_priv->mux_rsrc[i].cdm_ops = acquire_args->cdm_ops;
+			top_priv->mux_rsrc[i].tasklet_info = args->tasklet;
+			top_priv->mux_rsrc[i].res_state =
+				CAM_ISP_RESOURCE_STATE_RESERVED;
+			acquire_args->rsrc_node =
+				&top_priv->mux_rsrc[i];
+
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+
+}
+
+int cam_vfe_top_release(void *device_priv,
+	void *release_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+
+	if (!device_priv || !release_args) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver2_priv   *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)release_args;
+
+	CAM_DBG(CAM_ISP, "Resource in state %d", mux_res->res_state);
+	if (mux_res->res_state < CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Error! Resource in Invalid res_state :%d",
+			mux_res->res_state);
+		return -EINVAL;
+	}
+	mux_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+int cam_vfe_top_start(void *device_priv,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+	struct cam_hw_info                      *hw_info = NULL;
+	int rc = 0;
+
+	if (!device_priv || !start_args) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)start_args;
+	hw_info = (struct cam_hw_info  *)mux_res->hw_intf->hw_priv;
+
+	if (hw_info->hw_state == CAM_HW_STATE_POWER_UP) {
+		rc = cam_vfe_top_set_hw_clk_rate(top_priv);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"set_hw_clk_rate failed, rc=%d", rc);
+			return rc;
+		}
+
+		rc = cam_vfe_top_set_axi_bw_vote(top_priv, true);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"set_axi_bw_vote failed, rc=%d", rc);
+			return rc;
+		}
+
+		if (mux_res->start) {
+			rc = mux_res->start(mux_res);
+		} else {
+			CAM_ERR(CAM_ISP,
+				"Invalid res id:%d", mux_res->res_id);
+			rc = -EINVAL;
+		}
+	} else {
+		CAM_ERR(CAM_ISP, "VFE HW not powered up");
+		rc = -EPERM;
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_stop(void *device_priv,
+	void *stop_args, uint32_t arg_size)
+{
+	struct cam_vfe_top_ver2_priv            *top_priv;
+	struct cam_isp_resource_node            *mux_res;
+	struct cam_hw_info                      *hw_info = NULL;
+	int i, rc = 0;
+
+	if (!device_priv || !stop_args) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input arguments");
+		return -EINVAL;
+	}
+
+	top_priv = (struct cam_vfe_top_ver2_priv   *)device_priv;
+	mux_res = (struct cam_isp_resource_node *)stop_args;
+	hw_info = (struct cam_hw_info  *)mux_res->hw_intf->hw_priv;
+
+	if ((mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF) ||
+		(mux_res->res_id == CAM_ISP_HW_VFE_IN_CAMIF_LITE) ||
+		((mux_res->res_id >= CAM_ISP_HW_VFE_IN_RDI0) &&
+		(mux_res->res_id <= CAM_ISP_HW_VFE_IN_RDI3))) {
+		rc = mux_res->stop(mux_res);
+	} else {
+		CAM_ERR(CAM_ISP, "Invalid res id:%d", mux_res->res_id);
+		return -EINVAL;
+	}
+
+	if (!rc) {
+		for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+			if (top_priv->mux_rsrc[i].res_id == mux_res->res_id) {
+				top_priv->req_clk_rate[i] = 0;
+				top_priv->req_axi_vote[i].compressed_bw = 0;
+				top_priv->req_axi_vote[i].uncompressed_bw = 0;
+				top_priv->axi_vote_control[i] =
+					CAM_VFE_BW_CONTROL_EXCLUDE;
+				break;
+			}
+		}
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_read(void *device_priv,
+	void *read_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_write(void *device_priv,
+	void *write_args, uint32_t arg_size)
+{
+	return -EPERM;
+}
+
+int cam_vfe_top_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	int rc = 0;
+	struct cam_vfe_top_ver2_priv            *top_priv;
+
+	if (!device_priv || !cmd_args) {
+		CAM_ERR(CAM_ISP, "Error! Invalid arguments");
+		return -EINVAL;
+	}
+	top_priv = (struct cam_vfe_top_ver2_priv *)device_priv;
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_CHANGE_BASE:
+		rc = cam_vfe_top_mux_get_base(top_priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_REG_UPDATE:
+		rc = cam_vfe_top_mux_get_reg_update(top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_CLOCK_UPDATE:
+		rc = cam_vfe_top_clock_update(top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_BW_UPDATE:
+		rc = cam_vfe_top_bw_update(top_priv, cmd_args,
+			arg_size);
+		break;
+	case CAM_ISP_HW_CMD_BW_CONTROL:
+		rc = cam_vfe_top_bw_control(top_priv, cmd_args, arg_size);
+		break;
+	default:
+		rc = -EINVAL;
+		CAM_ERR(CAM_ISP, "Error! Invalid cmd:%d", cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_vfe_top_ver2_init(
+	struct cam_hw_soc_info                 *soc_info,
+	struct cam_hw_intf                     *hw_intf,
+	void                                   *top_hw_info,
+	struct cam_vfe_top                    **vfe_top_ptr)
+{
+	int i, j, rc = 0;
+	struct cam_vfe_top_ver2_priv           *top_priv = NULL;
+	struct cam_vfe_top_ver2_hw_info        *ver2_hw_info = top_hw_info;
+	struct cam_vfe_top                     *vfe_top;
+
+	vfe_top = kzalloc(sizeof(struct cam_vfe_top), GFP_KERNEL);
+	if (!vfe_top) {
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	top_priv = kzalloc(sizeof(struct cam_vfe_top_ver2_priv),
+		GFP_KERNEL);
+	if (!top_priv) {
+		CAM_DBG(CAM_ISP, "Error! Failed to alloc for vfe_top_priv");
+		rc = -ENOMEM;
+		goto free_vfe_top;
+	}
+	vfe_top->top_priv = top_priv;
+	top_priv->hw_clk_rate = 0;
+	top_priv->applied_axi_vote.compressed_bw = 0;
+	top_priv->applied_axi_vote.uncompressed_bw = 0;
+	memset(top_priv->last_vote, 0x0, sizeof(struct cam_axi_vote) *
+		(CAM_VFE_TOP_VER2_MUX_MAX *
+		CAM_VFE_DELAY_BW_REDUCTION_NUM_FRAMES));
+	top_priv->last_counter = 0;
+
+	for (i = 0, j = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		top_priv->mux_rsrc[i].res_type = CAM_ISP_RESOURCE_VFE_IN;
+		top_priv->mux_rsrc[i].hw_intf = hw_intf;
+		top_priv->mux_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_AVAILABLE;
+		top_priv->req_clk_rate[i] = 0;
+		top_priv->req_axi_vote[i].compressed_bw = 0;
+		top_priv->req_axi_vote[i].uncompressed_bw = 0;
+		top_priv->axi_vote_control[i] = CAM_VFE_BW_CONTROL_EXCLUDE;
+
+
+		if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
+			top_priv->mux_rsrc[i].res_id =
+				CAM_ISP_HW_VFE_IN_CAMIF;
+
+			rc = cam_vfe_camif_ver2_init(hw_intf, soc_info,
+				&ver2_hw_info->camif_hw_info,
+				&top_priv->mux_rsrc[i]);
+			if (rc)
+				goto deinit_resources;
+		} else if (ver2_hw_info->mux_type[i] ==
+			CAM_VFE_CAMIF_LITE_VER_2_0) {
+			top_priv->mux_rsrc[i].res_id =
+				CAM_ISP_HW_VFE_IN_CAMIF_LITE;
+
+			rc = cam_vfe_camif_lite_ver2_init(hw_intf, soc_info,
+				&ver2_hw_info->camif_lite_hw_info,
+				&top_priv->mux_rsrc[i]);
+
+			if (rc)
+				goto deinit_resources;
+		} else if (ver2_hw_info->mux_type[i] ==
+			CAM_VFE_RDI_VER_1_0) {
+			/* set the RDI resource id */
+			top_priv->mux_rsrc[i].res_id =
+				CAM_ISP_HW_VFE_IN_RDI0 + j++;
+
+			rc = cam_vfe_rdi_ver2_init(hw_intf, soc_info,
+				&ver2_hw_info->rdi_hw_info,
+				&top_priv->mux_rsrc[i]);
+			if (rc)
+				goto deinit_resources;
+		} else {
+			CAM_WARN(CAM_ISP, "Invalid mux type: %u",
+				ver2_hw_info->mux_type[i]);
+		}
+	}
+
+	vfe_top->hw_ops.get_hw_caps = cam_vfe_top_get_hw_caps;
+	vfe_top->hw_ops.init        = cam_vfe_top_init_hw;
+	vfe_top->hw_ops.reset       = cam_vfe_top_reset;
+	vfe_top->hw_ops.reserve     = cam_vfe_top_reserve;
+	vfe_top->hw_ops.release     = cam_vfe_top_release;
+	vfe_top->hw_ops.start       = cam_vfe_top_start;
+	vfe_top->hw_ops.stop        = cam_vfe_top_stop;
+	vfe_top->hw_ops.read        = cam_vfe_top_read;
+	vfe_top->hw_ops.write       = cam_vfe_top_write;
+	vfe_top->hw_ops.process_cmd = cam_vfe_top_process_cmd;
+	*vfe_top_ptr = vfe_top;
+
+	top_priv->common_data.soc_info     = soc_info;
+	top_priv->common_data.hw_intf      = hw_intf;
+	top_priv->common_data.common_reg   = ver2_hw_info->common_reg;
+
+	return rc;
+
+deinit_resources:
+	for (--i; i >= 0; i--) {
+		if (ver2_hw_info->mux_type[i] == CAM_VFE_CAMIF_VER_2_0) {
+			if (cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "Camif Deinit failed");
+		} else if (ver2_hw_info->mux_type[i] ==
+			CAM_VFE_CAMIF_LITE_VER_2_0) {
+			if (cam_vfe_camif_lite_ver2_deinit(
+				&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "Camif lite deinit failed");
+		} else {
+			if (cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]))
+				CAM_ERR(CAM_ISP, "RDI Deinit failed");
+		}
+		top_priv->mux_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	}
+
+	kfree(vfe_top->top_priv);
+free_vfe_top:
+	kfree(vfe_top);
+end:
+	return rc;
+}
+
+int cam_vfe_top_ver2_deinit(struct cam_vfe_top  **vfe_top_ptr)
+{
+	int i, rc = 0;
+	struct cam_vfe_top_ver2_priv           *top_priv = NULL;
+	struct cam_vfe_top                     *vfe_top;
+
+	if (!vfe_top_ptr) {
+		CAM_ERR(CAM_ISP, "Error! Invalid input");
+		return -EINVAL;
+	}
+
+	vfe_top = *vfe_top_ptr;
+	if (!vfe_top) {
+		CAM_ERR(CAM_ISP, "Error! vfe_top NULL");
+		return -ENODEV;
+	}
+
+	top_priv = vfe_top->top_priv;
+	if (!top_priv) {
+		CAM_ERR(CAM_ISP, "Error! vfe_top_priv NULL");
+		rc = -ENODEV;
+		goto free_vfe_top;
+	}
+
+	for (i = 0; i < CAM_VFE_TOP_VER2_MUX_MAX; i++) {
+		top_priv->mux_rsrc[i].res_state =
+			CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+		if (top_priv->mux_rsrc[i].res_type ==
+			CAM_VFE_CAMIF_VER_2_0) {
+			rc = cam_vfe_camif_ver2_deinit(&top_priv->mux_rsrc[i]);
+			if (rc)
+				CAM_ERR(CAM_ISP, "Camif deinit failed rc=%d",
+					rc);
+		} else if (top_priv->mux_rsrc[i].res_type ==
+			CAM_VFE_CAMIF_LITE_VER_2_0) {
+			rc = cam_vfe_camif_lite_ver2_deinit(
+				&top_priv->mux_rsrc[i]);
+			if (rc)
+				CAM_ERR(CAM_ISP,
+					"Camif lite deinit failed rc=%d", rc);
+		} else if (top_priv->mux_rsrc[i].res_type ==
+			CAM_VFE_RDI_VER_1_0) {
+			rc = cam_vfe_rdi_ver2_deinit(&top_priv->mux_rsrc[i]);
+			if (rc)
+				CAM_ERR(CAM_ISP, "RDI deinit failed rc=%d", rc);
+		}
+	}
+
+	kfree(vfe_top->top_priv);
+
+free_vfe_top:
+	kfree(vfe_top);
+	*vfe_top_ptr = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
new file mode 100644
index 0000000..d362036
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver2.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_TOP_VER2_H_
+#define _CAM_VFE_TOP_VER2_H_
+
+#include "cam_vfe_camif_ver2.h"
+#include "cam_vfe_camif_lite_ver2.h"
+#include "cam_vfe_rdi.h"
+
+#define CAM_VFE_TOP_VER2_MUX_MAX     5
+
+enum cam_vfe_top_ver2_module_type {
+	CAM_VFE_TOP_VER2_MODULE_LENS,
+	CAM_VFE_TOP_VER2_MODULE_STATS,
+	CAM_VFE_TOP_VER2_MODULE_COLOR,
+	CAM_VFE_TOP_VER2_MODULE_ZOOM,
+	CAM_VFE_TOP_VER2_MODULE_MAX,
+};
+
+struct cam_vfe_top_ver2_reg_offset_module_ctrl {
+	uint32_t reset;
+	uint32_t cgc_ovd;
+	uint32_t enable;
+};
+
+struct cam_vfe_top_ver2_reg_offset_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t lens_feature;
+	uint32_t stats_feature;
+	uint32_t color_feature;
+	uint32_t zoom_feature;
+	uint32_t global_reset_cmd;
+	struct cam_vfe_top_ver2_reg_offset_module_ctrl
+		*module_ctrl[CAM_VFE_TOP_VER2_MODULE_MAX];
+	uint32_t bus_cgc_ovd;
+	uint32_t core_cfg;
+	uint32_t three_D_cfg;
+	uint32_t violation_status;
+	uint32_t reg_update_cmd;
+};
+
+struct cam_vfe_top_ver2_hw_info {
+	struct cam_vfe_top_ver2_reg_offset_common  *common_reg;
+	struct cam_vfe_camif_ver2_hw_info           camif_hw_info;
+	struct cam_vfe_camif_lite_ver2_hw_info      camif_lite_hw_info;
+	struct cam_vfe_rdi_ver2_hw_info             rdi_hw_info;
+	uint32_t mux_type[CAM_VFE_TOP_VER2_MUX_MAX];
+};
+
+int cam_vfe_top_ver2_init(struct cam_hw_soc_info     *soc_info,
+	struct cam_hw_intf                           *hw_intf,
+	void                                         *top_hw_info,
+	struct cam_vfe_top                          **vfe_top);
+
+int cam_vfe_top_ver2_deinit(struct cam_vfe_top      **vfe_top);
+
+#endif /* _CAM_VFE_TOP_VER2_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
new file mode 100644
index 0000000..5cc7fa0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/include/cam_vfe_top.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_VFE_TOP_H_
+#define _CAM_VFE_TOP_H_
+
+#include "cam_hw_intf.h"
+#include "cam_isp_hw.h"
+
+#define CAM_VFE_TOP_VER_1_0 0x100000
+#define CAM_VFE_TOP_VER_2_0 0x200000
+
+#define CAM_VFE_CAMIF_VER_1_0 0x10
+#define CAM_VFE_CAMIF_VER_2_0 0x20
+
+#define CAM_VFE_CAMIF_LITE_VER_2_0 0x02
+
+#define CAM_VFE_RDI_VER_1_0 0x1000
+
+struct cam_vfe_top {
+	void                   *top_priv;
+	struct cam_hw_ops       hw_ops;
+};
+
+int cam_vfe_top_init(uint32_t          top_version,
+	struct cam_hw_soc_info        *soc_info,
+	struct cam_hw_intf            *hw_intf,
+	void                          *top_hw_info,
+	struct cam_vfe_top            **vfe_top);
+
+int cam_vfe_top_deinit(uint32_t        top_version,
+	struct cam_vfe_top           **vfe_top);
+
+#endif /* _CAM_VFE_TOP_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/Makefile
new file mode 100644
index 0000000..25e8680
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_dev.o cam_jpeg_context.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
new file mode 100644
index 0000000..72252b1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_sync_api.h"
+#include "cam_jpeg_context.h"
+#include "cam_context_utils.h"
+#include "cam_debug_util.h"
+#include "cam_packet_util.h"
+
+static const char jpeg_dev_name[] = "jpeg";
+
+static int cam_jpeg_context_dump_active_request(void *data, unsigned long iova,
+	uint32_t buf_info)
+{
+
+	struct cam_context *ctx = (struct cam_context *)data;
+	struct cam_ctx_request          *req = NULL;
+	struct cam_ctx_request          *req_temp = NULL;
+	struct cam_hw_mgr_dump_pf_data  *pf_dbg_entry = NULL;
+	int rc = 0;
+	int closest_port;
+	bool b_mem_found = false;
+
+
+	if (!ctx) {
+		CAM_ERR(CAM_JPEG, "Invalid ctx");
+		return -EINVAL;
+	}
+
+	CAM_INFO(CAM_JPEG, "iommu fault for jpeg ctx %d state %d",
+		ctx->ctx_id, ctx->state);
+
+	list_for_each_entry_safe(req, req_temp,
+			&ctx->active_req_list, list) {
+		pf_dbg_entry = &(req->pf_data);
+		closest_port = -1;
+		CAM_INFO(CAM_JPEG, "req_id : %lld ", req->request_id);
+
+		rc = cam_context_dump_pf_info_to_hw(ctx, pf_dbg_entry->packet,
+			iova, buf_info, &b_mem_found);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "Failed to dump pf info");
+
+		if (b_mem_found)
+			CAM_ERR(CAM_JPEG, "Found page fault in req %lld %d",
+				req->request_id, rc);
+	}
+	return rc;
+}
+
+static int __cam_jpeg_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unable to Acquire device %d", rc);
+	else
+		ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_jpeg_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unable to release device %d", rc);
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_jpeg_ctx_flush_dev_in_acquired(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_ICP, "Failed to flush device");
+
+	return rc;
+}
+
+static int __cam_jpeg_ctx_config_dev_in_acquired(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	return cam_context_prepare_dev_to_hw(ctx, cmd);
+}
+
+static int __cam_jpeg_ctx_handle_buf_done_in_acquired(void *ctx,
+	uint32_t evt_id, void *done)
+{
+	return cam_context_buf_done_from_hw(ctx, done, evt_id);
+}
+
+static int __cam_jpeg_ctx_stop_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc;
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Failed in Stop dev, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_jpeg_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = { },
+		.crm_ops = { },
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_jpeg_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = { },
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.release_dev = __cam_jpeg_ctx_release_dev_in_acquired,
+			.config_dev = __cam_jpeg_ctx_config_dev_in_acquired,
+			.stop_dev = __cam_jpeg_ctx_stop_dev_in_acquired,
+			.flush_dev = __cam_jpeg_ctx_flush_dev_in_acquired,
+		},
+		.crm_ops = { },
+		.irq_ops = __cam_jpeg_ctx_handle_buf_done_in_acquired,
+		.pagefault_ops = cam_jpeg_context_dump_active_request,
+	},
+};
+
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id)
+{
+	int rc;
+	int i;
+
+	if (!ctx || !ctx_base) {
+		CAM_ERR(CAM_JPEG, "Invalid Context");
+		rc = -EFAULT;
+		goto err;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	ctx->base = ctx_base;
+
+	for (i = 0; i < CAM_CTX_REQ_MAX; i++)
+		ctx->req_base[i].req_priv = ctx;
+
+	rc = cam_context_init(ctx_base, jpeg_dev_name, CAM_JPEG, ctx_id,
+		NULL, hw_intf, ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Camera Context Base init failed");
+		goto err;
+	}
+
+	ctx_base->state_machine = cam_jpeg_ctx_state_machine;
+	ctx_base->ctx_priv = ctx;
+
+err:
+	return rc;
+}
+
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx)
+{
+	if (!ctx || !ctx->base) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK", ctx);
+		return -EINVAL;
+	}
+
+	cam_context_deinit(ctx->base);
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h
new file mode 100644
index 0000000..23d04ac
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_context.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_JPEG_CONTEXT_H_
+#define _CAM_JPEG_CONTEXT_H_
+
+#include <uapi/media/cam_jpeg.h>
+
+#include "cam_context.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+
+#define CAM_JPEG_HW_EVENT_MAX 20
+
+/**
+ * struct cam_jpeg_context - Jpeg context
+ * @base: Base jpeg cam context object
+ * @req_base: Common request structure
+ */
+struct cam_jpeg_context {
+	struct cam_context *base;
+	struct cam_ctx_request req_base[CAM_CTX_REQ_MAX];
+};
+
+/* cam jpeg context irq handling function type */
+typedef int (*cam_jpeg_hw_event_cb_func)(
+	struct cam_jpeg_context *ctx_jpeg,
+	void *evt_data);
+
+/**
+ * struct cam_jpeg_ctx_irq_ops - Function table for handling IRQ callbacks
+ *
+ * @irq_ops: Array of handle function pointers.
+ *
+ */
+struct cam_jpeg_ctx_irq_ops {
+	cam_jpeg_hw_event_cb_func irq_ops[CAM_JPEG_HW_EVENT_MAX];
+};
+
+/**
+ * cam_jpeg_context_init()
+ *
+ * @brief: Initialization function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be initialized
+ * @ctx_base: Context base from cam_context
+ * @hw_intf: JPEG hw manager interface
+ * @ctx_id: ID for this context
+ *
+ */
+int cam_jpeg_context_init(struct cam_jpeg_context *ctx,
+	struct cam_context *ctx_base,
+	struct cam_hw_mgr_intf *hw_intf,
+	uint32_t ctx_id);
+
+/**
+ * cam_jpeg_context_deinit()
+ *
+ * @brief: Deinitialize function for the JPEG context
+ *
+ * @ctx: JPEG context obj to be deinitialized
+ *
+ */
+int cam_jpeg_context_deinit(struct cam_jpeg_context *ctx);
+
+#endif  /* __CAM_JPEG_CONTEXT_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
new file mode 100644
index 0000000..b456d4f9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/ion.h>
+#include <linux/kernel.h>
+
+#include "cam_node.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_dev.h"
+#include "cam_debug_util.h"
+#include "cam_smmu_api.h"
+
+#define CAM_JPEG_DEV_NAME "cam-jpeg"
+
+static struct cam_jpeg_dev g_jpeg_dev;
+
+static void cam_jpeg_dev_iommu_fault_handler(
+	struct iommu_domain *domain, struct device *dev, unsigned long iova,
+	int flags, void *token, uint32_t buf_info)
+{
+	int i = 0;
+	struct cam_node *node = NULL;
+
+	if (!token) {
+		CAM_ERR(CAM_JPEG, "invalid token in page handler cb");
+		return;
+	}
+
+	node = (struct cam_node *)token;
+
+	for (i = 0; i < node->ctx_size; i++)
+		cam_context_dump_pf_info(&(node->ctx_list[i]), iova,
+			buf_info);
+}
+
+static const struct of_device_id cam_jpeg_dt_match[] = {
+	{
+		.compatible = "qcom,cam-jpeg"
+	},
+	{ }
+};
+
+static int cam_jpeg_subdev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+
+	mutex_lock(&g_jpeg_dev.jpeg_mutex);
+	g_jpeg_dev.open_cnt++;
+	mutex_unlock(&g_jpeg_dev.jpeg_mutex);
+
+	return 0;
+}
+
+static int cam_jpeg_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+
+	mutex_lock(&g_jpeg_dev.jpeg_mutex);
+	if (g_jpeg_dev.open_cnt <= 0) {
+		CAM_DBG(CAM_JPEG, "JPEG subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	g_jpeg_dev.open_cnt--;
+
+	if (!node) {
+		CAM_ERR(CAM_JPEG, "Node ptr is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (g_jpeg_dev.open_cnt == 0)
+		cam_node_shutdown(node);
+
+end:
+	mutex_unlock(&g_jpeg_dev.jpeg_mutex);
+	return rc;
+}
+
+static const struct v4l2_subdev_internal_ops cam_jpeg_subdev_internal_ops = {
+	.close = cam_jpeg_subdev_close,
+	.open = cam_jpeg_subdev_open,
+};
+
+static int cam_jpeg_dev_remove(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "JPEG context %d deinit failed %d",
+				i, rc);
+	}
+
+	rc = cam_subdev_remove(&g_jpeg_dev.sd);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Unregister failed %d", rc);
+
+	return rc;
+}
+
+static int cam_jpeg_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+	int iommu_hdl = -1;
+
+	g_jpeg_dev.sd.internal_ops = &cam_jpeg_subdev_internal_ops;
+	rc = cam_subdev_probe(&g_jpeg_dev.sd, pdev, CAM_JPEG_DEV_NAME,
+		CAM_JPEG_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "JPEG cam_subdev_probe failed %d", rc);
+		goto err;
+	}
+	node = (struct cam_node *)g_jpeg_dev.sd.token;
+
+	rc = cam_jpeg_hw_mgr_init(pdev->dev.of_node,
+		(uint64_t *)&hw_mgr_intf, &iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Can not initialize JPEG HWmanager %d", rc);
+		goto unregister;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_jpeg_context_init(&g_jpeg_dev.ctx_jpeg[i],
+			&g_jpeg_dev.ctx[i],
+			&node->hw_mgr_intf,
+			i);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "JPEG context init failed %d %d",
+				i, rc);
+			goto ctx_init_fail;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_jpeg_dev.ctx, CAM_CTX_MAX,
+		CAM_JPEG_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "JPEG node init failed %d", rc);
+		goto ctx_init_fail;
+	}
+
+	cam_smmu_set_client_page_fault_handler(iommu_hdl,
+		cam_jpeg_dev_iommu_fault_handler, node);
+
+	mutex_init(&g_jpeg_dev.jpeg_mutex);
+
+	CAM_INFO(CAM_JPEG, "Camera JPEG probe complete");
+
+	return rc;
+
+ctx_init_fail:
+	for (--i; i >= 0; i--)
+		if (cam_jpeg_context_deinit(&g_jpeg_dev.ctx_jpeg[i]))
+			CAM_ERR(CAM_JPEG, "deinit fail %d %d", i, rc);
+unregister:
+	if (cam_subdev_remove(&g_jpeg_dev.sd))
+		CAM_ERR(CAM_JPEG, "remove fail %d", rc);
+err:
+	return rc;
+}
+
+static struct platform_driver jpeg_driver = {
+	.probe = cam_jpeg_dev_probe,
+	.remove = cam_jpeg_dev_remove,
+	.driver = {
+		.name = "cam_jpeg",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_jpeg_dev_init_module(void)
+{
+	return platform_driver_register(&jpeg_driver);
+}
+
+static void __exit cam_jpeg_dev_exit_module(void)
+{
+	platform_driver_unregister(&jpeg_driver);
+}
+
+module_init(cam_jpeg_dev_init_module);
+module_exit(cam_jpeg_dev_exit_module);
+MODULE_DESCRIPTION("MSM JPEG driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
new file mode 100644
index 0000000..a222c4b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/cam_jpeg_dev.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_JPEG_DEV_H_
+#define _CAM_JPEG_DEV_H_
+
+#include "cam_subdev.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_context.h"
+#include "cam_jpeg_context.h"
+
+/**
+ * struct cam_jpeg_dev - Camera JPEG V4l2 device node
+ *
+ * @sd: Commone camera subdevice node
+ * @node: Pointer to jpeg subdevice
+ * @ctx: JPEG base context storage
+ * @ctx_jpeg: JPEG private context storage
+ * @jpeg_mutex: Jpeg dev mutex
+ * @open_cnt: Open device count
+ */
+struct cam_jpeg_dev {
+	struct cam_subdev sd;
+	struct cam_node *node;
+	struct cam_context ctx[CAM_CTX_MAX];
+	struct cam_jpeg_context ctx_jpeg[CAM_CTX_MAX];
+	struct mutex jpeg_mutex;
+	int32_t open_cnt;
+};
+#endif /* __CAM_JPEG_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile
new file mode 100644
index 0000000..3f823e4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_jpeg_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
new file mode 100644
index 0000000..272aa8f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.c
@@ -0,0 +1,1605 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/debugfs.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_sync_api.h"
+#include "cam_packet_util.h"
+#include "cam_hw.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_jpeg_hw_mgr.h"
+#include "cam_smmu_api.h"
+#include "cam_mem_mgr.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_debug_util.h"
+#include "cam_common_util.h"
+
+#define CAM_JPEG_HW_ENTRIES_MAX  20
+#define CAM_JPEG_CHBASE          0
+#define CAM_JPEG_CFG             1
+#define CAM_JPEG_PARAM           2
+
+static struct cam_jpeg_hw_mgr g_jpeg_hw_mgr;
+
+static int32_t cam_jpeg_hw_mgr_cb(uint32_t irq_status,
+	int32_t result_size, void *data);
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data);
+
+static int cam_jpeg_mgr_process_irq(void *priv, void *data)
+{
+	int rc = 0;
+	struct cam_jpeg_process_irq_work_data_t *task_data;
+	struct cam_jpeg_hw_mgr *hw_mgr;
+	int32_t i;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_done_event_data buf_data;
+	struct cam_jpeg_set_irq_cb irq_cb;
+	uintptr_t dev_type = 0;
+	uintptr_t kaddr;
+	uint32_t *cmd_buf_kaddr;
+	size_t cmd_buf_len;
+	struct cam_jpeg_config_inout_param_info *p_params;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_frame_work_data_t *wq_task_data;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_JPEG, "Invalid data");
+		return -EINVAL;
+	}
+
+	task_data = data;
+	hw_mgr = &g_jpeg_hw_mgr;
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)task_data->data;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		return -EINVAL;
+	}
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	mutex_lock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+
+	p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+
+	if (hw_mgr->device_in_use[dev_type][0] == false ||
+		p_cfg_req == NULL) {
+		CAM_ERR(CAM_JPEG, "irq for old request %d", rc);
+		mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+	irq_cb.data = NULL;
+	irq_cb.b_set_cb = false;
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+		CAM_ERR(CAM_JPEG, "process_cmd null ");
+		mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+		return -EINVAL;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		CAM_JPEG_CMD_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "CMD_SET_IRQ_CB failed %d", rc);
+		mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+		return rc;
+	}
+
+	if (hw_mgr->devices[dev_type][0]->hw_ops.deinit) {
+		rc = hw_mgr->devices[dev_type][0]->hw_ops.deinit(
+			hw_mgr->devices[dev_type][0]->hw_priv, NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "Failed to Deinit %lu HW", dev_type);
+	}
+
+	hw_mgr->device_in_use[dev_type][0] = false;
+	hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
+	mutex_unlock(&g_jpeg_hw_mgr.hw_mgr_mutex);
+
+	task = cam_req_mgr_workq_get_task(
+		g_jpeg_hw_mgr.work_process_frame);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		return -EINVAL;
+	}
+
+	wq_task_data = (struct cam_jpeg_process_frame_work_data_t *)
+		task->payload;
+	if (!task_data) {
+		CAM_ERR(CAM_JPEG, "task_data is NULL");
+		return -EINVAL;
+	}
+	wq_task_data->data = (void *)dev_type;
+	wq_task_data->request_id = 0;
+	wq_task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_cmd;
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "could not enque task %d", rc);
+		return rc;
+	}
+
+	rc = cam_mem_get_cpu_buf(
+		p_cfg_req->hw_cfg_args.hw_update_entries[CAM_JPEG_PARAM].handle,
+		&kaddr, &cmd_buf_len);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to get info for cmd buf: %x %d",
+			hw_mgr->iommu_hdl, rc);
+		return rc;
+	}
+
+	cmd_buf_kaddr = (uint32_t *)kaddr;
+
+	cmd_buf_kaddr =
+		(cmd_buf_kaddr +
+		(p_cfg_req->hw_cfg_args.hw_update_entries[CAM_JPEG_PARAM].offset
+			/ sizeof(uint32_t)));
+
+	p_params = (struct cam_jpeg_config_inout_param_info *)cmd_buf_kaddr;
+
+	p_params->output_size = task_data->result_size;
+	CAM_DBG(CAM_JPEG, "Encoded Size %d", task_data->result_size);
+
+	buf_data.num_handles =
+		p_cfg_req->hw_cfg_args.num_out_map_entries;
+	for (i = 0; i < buf_data.num_handles; i++) {
+		buf_data.resource_handle[i] =
+		p_cfg_req->hw_cfg_args.out_map_entries[i].resource_handle;
+	}
+	buf_data.request_id =
+		PTR_TO_U64(p_cfg_req->hw_cfg_args.priv);
+	ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+
+	list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+	return rc;
+}
+
+static int cam_jpeg_hw_mgr_cb(
+	uint32_t irq_status, int32_t result_size, void *data)
+{
+	int32_t rc;
+	unsigned long flags;
+	struct cam_jpeg_hw_mgr *hw_mgr = &g_jpeg_hw_mgr;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_irq_work_data_t *task_data;
+
+	spin_lock_irqsave(&hw_mgr->hw_mgr_lock, flags);
+	task = cam_req_mgr_workq_get_task(
+		g_jpeg_hw_mgr.work_process_irq_cb);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+		return -ENOMEM;
+	}
+
+	task_data = (struct cam_jpeg_process_irq_work_data_t *)task->payload;
+	task_data->data = data;
+	task_data->irq_status = irq_status;
+	task_data->result_size = result_size;
+	task_data->type = CAM_JPEG_WORKQ_TASK_MSG_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_irq;
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	spin_unlock_irqrestore(&hw_mgr->hw_mgr_lock, flags);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_get_free_ctx(struct cam_jpeg_hw_mgr *hw_mgr)
+{
+	int i = 0;
+	int num_ctx = CAM_JPEG_CTX_MAX;
+
+	for (i = 0; i < num_ctx; i++) {
+		mutex_lock(&hw_mgr->ctx_data[i].ctx_mutex);
+		if (hw_mgr->ctx_data[i].in_use == false) {
+			hw_mgr->ctx_data[i].in_use = true;
+			mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+			break;
+		}
+		mutex_unlock(&hw_mgr->ctx_data[i].ctx_mutex);
+	}
+
+	return i;
+}
+
+
+static int cam_jpeg_mgr_release_ctx(
+	struct cam_jpeg_hw_mgr *hw_mgr, struct cam_jpeg_hw_ctx_data *ctx_data)
+{
+	if (!ctx_data) {
+		CAM_ERR(CAM_JPEG, "invalid ctx_data %pK", ctx_data);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is already un-used: %pK", ctx_data);
+		mutex_unlock(&ctx_data->ctx_mutex);
+		return -EINVAL;
+	}
+
+	ctx_data->in_use = false;
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	return 0;
+}
+
+static int cam_jpeg_insert_cdm_change_base(
+	struct cam_hw_config_args *config_args,
+	struct cam_jpeg_hw_ctx_data *ctx_data,
+	struct cam_jpeg_hw_mgr *hw_mgr)
+{
+	int rc;
+	uint32_t dev_type;
+	struct cam_cdm_bl_request *cdm_cmd;
+	uint32_t size;
+	uint32_t mem_cam_base;
+	uintptr_t iova_addr;
+	uint32_t *ch_base_iova_addr;
+	size_t ch_base_len;
+
+	rc = cam_mem_get_cpu_buf(
+		config_args->hw_update_entries[CAM_JPEG_CHBASE].handle,
+		&iova_addr, &ch_base_len);
+	if (rc) {
+		CAM_ERR(CAM_JPEG,
+			"unable to get src buf info for cmd buf: %d", rc);
+		return rc;
+	}
+	CAM_DBG(CAM_JPEG, "iova %pK len %zu offset %d",
+		(void *)iova_addr, ch_base_len,
+		config_args->hw_update_entries[CAM_JPEG_CHBASE].offset);
+	ch_base_iova_addr = (uint32_t *)iova_addr;
+	ch_base_iova_addr = (ch_base_iova_addr +
+		(config_args->hw_update_entries[CAM_JPEG_CHBASE].offset /
+		sizeof(uint32_t)));
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+	mem_cam_base = hw_mgr->cdm_reg_map[dev_type][0]->mem_cam_base;
+	size =
+	hw_mgr->cdm_info[dev_type][0].cdm_ops->cdm_required_size_changebase();
+	hw_mgr->cdm_info[dev_type][0].cdm_ops->cdm_write_changebase(
+		ch_base_iova_addr, mem_cam_base);
+
+	cdm_cmd = ctx_data->cdm_cmd;
+	cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle =
+		config_args->hw_update_entries[CAM_JPEG_CHBASE].handle;
+	cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].offset =
+		config_args->hw_update_entries[CAM_JPEG_CHBASE].offset;
+	cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].len = size * sizeof(uint32_t);
+	cdm_cmd->cmd_arrary_count++;
+
+	ch_base_iova_addr += size;
+	*ch_base_iova_addr = 0;
+	ch_base_iova_addr += size;
+	*ch_base_iova_addr = 0;
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_process_cmd(void *priv, void *data)
+{
+	int rc;
+	int i = 0;
+	struct cam_jpeg_hw_mgr *hw_mgr = priv;
+	struct cam_hw_update_entry *cmd;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_hw_config_args *config_args = NULL;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uintptr_t request_id = 0;
+	struct cam_jpeg_process_frame_work_data_t *task_data =
+		(struct cam_jpeg_process_frame_work_data_t *)data;
+	uint32_t dev_type;
+	struct cam_jpeg_set_irq_cb irq_cb;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	struct cam_hw_done_event_data buf_data;
+	struct cam_hw_config_args *hw_cfg_args = NULL;
+
+	if (!hw_mgr || !task_data) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+			hw_mgr, task_data);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	if (list_empty(&hw_mgr->hw_config_req_list)) {
+		CAM_DBG(CAM_JPEG, "no available request");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	p_cfg_req = list_first_entry(&hw_mgr->hw_config_req_list,
+		struct cam_jpeg_hw_cfg_req, list);
+	if (!p_cfg_req) {
+		CAM_ERR(CAM_JPEG, "no request");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	if (false == hw_mgr->device_in_use[p_cfg_req->dev_type][0]) {
+		hw_mgr->device_in_use[p_cfg_req->dev_type][0] = true;
+		hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = p_cfg_req;
+		list_del_init(&p_cfg_req->list);
+	} else {
+		CAM_DBG(CAM_JPEG, "Not dequeing, just return");
+		rc = -EFAULT;
+		goto end;
+	}
+
+	config_args = (struct cam_hw_config_args *)&p_cfg_req->hw_cfg_args;
+	request_id = task_data->request_id;
+	if (request_id != (uintptr_t)config_args->priv) {
+		CAM_DBG(CAM_JPEG, "not a recent req %zd %zd",
+			request_id, (uintptr_t)config_args->priv);
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		rc = -EINVAL;
+		goto end_unusedev;
+	}
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		rc = -EINVAL;
+		goto end_unusedev;
+	}
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	if (dev_type != p_cfg_req->dev_type)
+		CAM_WARN(CAM_JPEG, "dev types not same something wrong");
+
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.init) {
+		CAM_ERR(CAM_JPEG, "hw op init null ");
+		rc = -EFAULT;
+		goto end;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.init(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		ctx_data,
+		sizeof(ctx_data));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Failed to Init %d HW", dev_type);
+		goto end;
+	}
+
+	irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+	irq_cb.data = (void *)ctx_data;
+	irq_cb.b_set_cb = true;
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+		CAM_ERR(CAM_JPEG, "op process_cmd null ");
+		rc = -EFAULT;
+		goto end_callcb;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		CAM_JPEG_CMD_SET_IRQ_CB,
+		&irq_cb, sizeof(irq_cb));
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "SET_IRQ_CB failed %d", rc);
+		goto end_callcb;
+	}
+
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.reset) {
+		CAM_ERR(CAM_JPEG, "op reset null ");
+		rc = -EFAULT;
+		goto end_callcb;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.reset(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg hw reset failed %d", rc);
+		goto end_callcb;
+	}
+
+	cdm_cmd = ctx_data->cdm_cmd;
+	cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+	cdm_cmd->flag = false;
+	cdm_cmd->userdata = NULL;
+	cdm_cmd->cookie = 0;
+	cdm_cmd->cmd_arrary_count = 0;
+
+	rc = cam_jpeg_insert_cdm_change_base(config_args,
+		ctx_data, hw_mgr);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "insert change base failed %d", rc);
+		goto end_callcb;
+	}
+
+	CAM_DBG(CAM_JPEG, "num hw up %d", config_args->num_hw_update_entries);
+	for (i = CAM_JPEG_CFG; i < (config_args->num_hw_update_entries - 1);
+		i++) {
+		cmd = (config_args->hw_update_entries + i);
+		cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].bl_addr.mem_handle
+			= cmd->handle;
+		cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].offset =
+			cmd->offset;
+		cdm_cmd->cmd[cdm_cmd->cmd_arrary_count].len =
+			cmd->len;
+		CAM_DBG(CAM_JPEG, "i %d entry h %d o %d l %d",
+			i, cmd->handle, cmd->offset, cmd->len);
+		cdm_cmd->cmd_arrary_count++;
+	}
+
+	rc = cam_cdm_submit_bls(
+		hw_mgr->cdm_info[dev_type][0].cdm_handle, cdm_cmd);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Failed to apply the configs %d", rc);
+		goto end_callcb;
+	}
+
+	if (!hw_mgr->devices[dev_type][0]->hw_ops.start) {
+		CAM_ERR(CAM_JPEG, "op start null ");
+		rc = -EINVAL;
+		goto end_callcb;
+	}
+	rc = hw_mgr->devices[dev_type][0]->hw_ops.start(
+		hw_mgr->devices[dev_type][0]->hw_priv,
+		NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Failed to start hw %d",
+			rc);
+		goto end_callcb;
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+
+end_callcb:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	if (p_cfg_req) {
+		hw_cfg_args = &p_cfg_req->hw_cfg_args;
+		buf_data.num_handles =
+			hw_cfg_args->num_out_map_entries;
+		for (i = 0; i < buf_data.num_handles; i++) {
+			buf_data.resource_handle[i] =
+			hw_cfg_args->out_map_entries[i].resource_handle;
+		}
+		buf_data.request_id =
+			(uintptr_t)p_cfg_req->hw_cfg_args.priv;
+		ctx_data->ctxt_event_cb(ctx_data->context_priv, 0, &buf_data);
+	}
+
+end_unusedev:
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	hw_mgr->device_in_use[p_cfg_req->dev_type][0] = false;
+	hw_mgr->dev_hw_cfg_args[p_cfg_req->dev_type][0] = NULL;
+
+end:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_jpeg_mgr_config_hw(void *hw_mgr_priv, void *config_hw_args)
+{
+	int rc;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_config_args *config_args = config_hw_args;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uintptr_t request_id = 0;
+	struct cam_hw_update_entry *hw_update_entries;
+	struct crm_workq_task *task;
+	struct cam_jpeg_process_frame_work_data_t *task_data;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+
+	if (!hw_mgr || !config_args) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments %pK %pK",
+			hw_mgr, config_args);
+		return -EINVAL;
+	}
+
+	if (!config_args->num_hw_update_entries) {
+		CAM_ERR(CAM_JPEG, "No hw update enteries are available");
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)config_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	if (list_empty(&hw_mgr->free_req_list)) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_JPEG, "list empty");
+		return -ENOMEM;
+	}
+
+	p_cfg_req = list_first_entry(&hw_mgr->free_req_list,
+		struct cam_jpeg_hw_cfg_req, list);
+	list_del_init(&p_cfg_req->list);
+
+	/* Update Currently Processing Config Request */
+	p_cfg_req->hw_cfg_args = *config_args;
+	p_cfg_req->dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	request_id = (uintptr_t)config_args->priv;
+	p_cfg_req->req_id = request_id;
+	hw_update_entries = config_args->hw_update_entries;
+	CAM_DBG(CAM_JPEG, "ctx_data = %pK req_id = %lld %zd",
+		ctx_data, request_id, (uintptr_t)config_args->priv);
+	task = cam_req_mgr_workq_get_task(g_jpeg_hw_mgr.work_process_frame);
+	if (!task) {
+		CAM_ERR(CAM_JPEG, "no empty task");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		rc = -ENOMEM;
+		goto err_after_dq_free_list;
+	}
+
+
+	task_data = (struct cam_jpeg_process_frame_work_data_t *)
+		task->payload;
+	if (!task_data) {
+		CAM_ERR(CAM_JPEG, "task_data is NULL");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		rc = -EINVAL;
+		goto err_after_dq_free_list;
+	}
+	CAM_DBG(CAM_JPEG, "cfge %pK num %d",
+		p_cfg_req->hw_cfg_args.hw_update_entries,
+		p_cfg_req->hw_cfg_args.num_hw_update_entries);
+
+	list_add_tail(&p_cfg_req->list, &hw_mgr->hw_config_req_list);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	task_data->data = (void *)(uintptr_t)p_cfg_req->dev_type;
+	task_data->request_id = request_id;
+	task_data->type = CAM_JPEG_WORKQ_TASK_CMD_TYPE;
+	task->process_cb = cam_jpeg_mgr_process_cmd;
+
+	rc = cam_req_mgr_workq_enqueue_task(task, &g_jpeg_hw_mgr,
+		CRM_TASK_PRIORITY_0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "failed to enqueue task %d", rc);
+		goto err_after_get_task;
+	}
+
+	return rc;
+
+err_after_get_task:
+	list_del_init(&p_cfg_req->list);
+err_after_dq_free_list:
+	list_add_tail(&p_cfg_req->list, &hw_mgr->free_req_list);
+
+	return rc;
+}
+
+static void cam_jpeg_mgr_print_io_bufs(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl, uint32_t pf_buf_info,
+	bool *mem_found)
+{
+	uint64_t   iova_addr;
+	size_t     src_buf_size;
+	int        i;
+	int        j;
+	int        rc = 0;
+	int32_t    mmu_hdl;
+	struct cam_buf_io_cfg  *io_cfg = NULL;
+
+	if (mem_found)
+		*mem_found = false;
+
+	io_cfg = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+
+	for (i = 0; i < packet->num_io_configs; i++) {
+		for (j = 0; j < CAM_PACKET_MAX_PLANES; j++) {
+			if (!io_cfg[i].mem_handle[j])
+				break;
+
+			if (GET_FD_FROM_HANDLE(io_cfg[i].mem_handle[j]) ==
+				GET_FD_FROM_HANDLE(pf_buf_info)) {
+				CAM_INFO(CAM_JPEG,
+					"Found PF at port: %d mem %x fd: %x",
+					io_cfg[i].resource_type,
+					io_cfg[i].mem_handle[j],
+					pf_buf_info);
+				if (mem_found)
+					*mem_found = true;
+			}
+
+			CAM_INFO(CAM_JPEG, "port: %d f: %u format: %d dir %d",
+				io_cfg[i].resource_type,
+				io_cfg[i].fence,
+				io_cfg[i].format,
+				io_cfg[i].direction);
+
+			mmu_hdl = cam_mem_is_secure_buf(
+				io_cfg[i].mem_handle[j]) ? sec_mmu_hdl :
+				iommu_hdl;
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[j],
+				mmu_hdl, &iova_addr, &src_buf_size);
+			if (rc < 0) {
+				CAM_ERR(CAM_UTIL, "get src buf address fail");
+				continue;
+			}
+			if (iova_addr >> 32) {
+				CAM_ERR(CAM_JPEG, "Invalid mapped address");
+				rc = -EINVAL;
+				continue;
+			}
+
+			CAM_INFO(CAM_JPEG,
+				"pln %d w %d h %d size %d addr 0x%x offset 0x%x memh %x",
+				j, io_cfg[i].planes[j].width,
+				io_cfg[i].planes[j].height,
+				(int32_t)src_buf_size,
+				(unsigned int)iova_addr,
+				io_cfg[i].offsets[j],
+				io_cfg[i].mem_handle[j]);
+
+			iova_addr += io_cfg[i].offsets[j];
+		}
+	}
+}
+
+static int cam_jpeg_mgr_prepare_hw_update(void *hw_mgr_priv,
+	void *prepare_hw_update_args)
+{
+	int rc, i, j, k;
+	struct cam_hw_prepare_update_args *prepare_args =
+		prepare_hw_update_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_packet *packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct cam_buf_io_cfg *io_cfg_ptr = NULL;
+	struct cam_kmd_buf_info kmd_buf;
+
+	if (!prepare_args || !hw_mgr) {
+		CAM_ERR(CAM_JPEG, "Invalid args %pK %pK",
+			prepare_args, hw_mgr);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)prepare_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	packet = prepare_args->packet;
+	if (!packet) {
+		CAM_ERR(CAM_JPEG, "received packet is NULL");
+		return -EINVAL;
+	}
+
+	if (((packet->header.op_code & 0xff) != CAM_JPEG_OPCODE_ENC_UPDATE) &&
+		((packet->header.op_code
+		& 0xff) != CAM_JPEG_OPCODE_DMA_UPDATE)) {
+		CAM_ERR(CAM_JPEG, "Invalid Opcode in pkt: %d",
+			packet->header.op_code & 0xff);
+		return -EINVAL;
+	}
+
+	rc = cam_packet_util_validate_packet(packet);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "invalid packet %d", rc);
+		return rc;
+	}
+
+	if ((packet->num_cmd_buf > 5) || !packet->num_patches ||
+		!packet->num_io_configs) {
+		CAM_ERR(CAM_JPEG, "wrong number of cmd/patch info: %u %u",
+			packet->num_cmd_buf,
+			packet->num_patches);
+		return -EINVAL;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *)&packet->payload +
+		(packet->cmd_buf_offset / 4));
+	CAM_DBG(CAM_JPEG, "packet = %pK cmd_desc = %pK size = %lu",
+		(void *)packet, (void *)cmd_desc,
+		sizeof(struct cam_cmd_buf_desc));
+
+	rc = cam_packet_util_process_patches(packet, hw_mgr->iommu_hdl, -1);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "Patch processing failed %d", rc);
+		return rc;
+	}
+
+	io_cfg_ptr = (struct cam_buf_io_cfg *)((uint32_t *)&packet->payload +
+		packet->io_configs_offset / 4);
+	CAM_DBG(CAM_JPEG, "packet = %pK io_cfg_ptr = %pK size = %lu",
+		(void *)packet, (void *)io_cfg_ptr,
+		sizeof(struct cam_buf_io_cfg));
+	prepare_args->pf_data->packet = packet;
+
+	prepare_args->num_out_map_entries = 0;
+
+	for (i = 0, j = 0, k = 0; i < packet->num_io_configs; i++) {
+		if (io_cfg_ptr[i].direction == CAM_BUF_INPUT) {
+			prepare_args->in_map_entries[j].resource_handle =
+				io_cfg_ptr[i].resource_type;
+			prepare_args->in_map_entries[j++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_in_map_entries++;
+		} else {
+			prepare_args->in_map_entries[k].resource_handle =
+				io_cfg_ptr[i].resource_type;
+			prepare_args->out_map_entries[k++].sync_id =
+				io_cfg_ptr[i].fence;
+			prepare_args->num_out_map_entries++;
+		}
+		CAM_DBG(CAM_JPEG, "dir[%d]: %u, fence: %u",
+			i, io_cfg_ptr[i].direction, io_cfg_ptr[i].fence);
+	}
+
+
+	j = prepare_args->num_hw_update_entries;
+	rc = cam_packet_util_get_kmd_buffer(packet, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "get kmd buf failed %d", rc);
+		return rc;
+	}
+	/* fill kmd buf info into 1st hw update entry */
+	prepare_args->hw_update_entries[j].len =
+		(uint32_t)kmd_buf.used_bytes;
+	prepare_args->hw_update_entries[j].handle =
+		(uint32_t)kmd_buf.handle;
+	prepare_args->hw_update_entries[j].offset =
+		(uint32_t)kmd_buf.offset;
+	j++;
+
+	for (i = 0; i < packet->num_cmd_buf;  i++, j++) {
+		prepare_args->hw_update_entries[j].len =
+			(uint32_t)cmd_desc[i].length;
+		prepare_args->hw_update_entries[j].handle =
+			(uint32_t)cmd_desc[i].mem_handle;
+		prepare_args->hw_update_entries[j].offset =
+			(uint32_t)cmd_desc[i].offset;
+	}
+	prepare_args->num_hw_update_entries = j;
+	prepare_args->priv = (void *)(uintptr_t)packet->header.request_id;
+
+	CAM_DBG(CAM_JPEG, "will wait on input sync sync_id %d",
+		prepare_args->in_map_entries[0].sync_id);
+
+	return rc;
+}
+
+static void cam_jpeg_mgr_stop_deinit_dev(struct cam_jpeg_hw_mgr *hw_mgr,
+	struct cam_jpeg_hw_cfg_req *p_cfg_req, uint32_t dev_type)
+{
+	int rc = 0;
+	struct cam_jpeg_set_irq_cb irq_cb;
+
+	/* stop reset Unregister CB and deinit */
+	irq_cb.jpeg_hw_mgr_cb = cam_jpeg_hw_mgr_cb;
+	irq_cb.data = NULL;
+	irq_cb.b_set_cb = false;
+	if (hw_mgr->devices[dev_type][0]->hw_ops.process_cmd) {
+		rc = hw_mgr->devices[dev_type][0]->hw_ops.process_cmd(
+			hw_mgr->devices[dev_type][0]->hw_priv,
+			CAM_JPEG_CMD_SET_IRQ_CB,
+			&irq_cb, sizeof(irq_cb));
+		if (rc)
+			CAM_ERR(CAM_JPEG, "SET_IRQ_CB fail %d", rc);
+	} else {
+		CAM_ERR(CAM_JPEG, "process_cmd null %d", dev_type);
+	}
+
+	if (hw_mgr->devices[dev_type][0]->hw_ops.stop) {
+		rc = hw_mgr->devices[dev_type][0]->hw_ops.stop(
+			hw_mgr->devices[dev_type][0]->hw_priv,
+			NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "stop fail %d", rc);
+	} else {
+		CAM_ERR(CAM_JPEG, "op stop null %d", dev_type);
+	}
+
+	if (hw_mgr->devices[dev_type][0]->hw_ops.deinit) {
+		rc = hw_mgr->devices[dev_type][0]->hw_ops.deinit(
+			hw_mgr->devices[dev_type][0]->hw_priv,
+			NULL, 0);
+		if (rc)
+			CAM_ERR(CAM_JPEG, "Failed to Deinit %d HW %d",
+				dev_type, rc);
+	} else {
+		CAM_ERR(CAM_JPEG, "op deinit null %d", dev_type);
+	}
+
+	hw_mgr->device_in_use[dev_type][0] = false;
+	hw_mgr->dev_hw_cfg_args[dev_type][0] = NULL;
+}
+
+static int cam_jpeg_mgr_flush(void *hw_mgr_priv,
+	struct cam_jpeg_hw_ctx_data *ctx_data)
+{
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	uint32_t dev_type;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	struct cam_jpeg_hw_cfg_req *cfg_req = NULL, *req_temp = NULL;
+
+	CAM_DBG(CAM_JPEG, "E: JPEG flush ctx");
+
+	if (!hw_mgr || !ctx_data) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+	if (hw_mgr->device_in_use[dev_type][0] == true &&
+		p_cfg_req != NULL) {
+		if ((struct cam_jpeg_hw_ctx_data *)
+			p_cfg_req->hw_cfg_args.ctxt_to_hw_map == ctx_data) {
+			cam_jpeg_mgr_stop_deinit_dev(hw_mgr, p_cfg_req,
+				dev_type);
+			list_del_init(&p_cfg_req->list);
+			list_add_tail(&p_cfg_req->list,
+				&hw_mgr->free_req_list);
+		}
+	}
+
+	list_for_each_entry_safe(cfg_req, req_temp,
+		&hw_mgr->hw_config_req_list, list) {
+		if ((struct cam_jpeg_hw_ctx_data *)
+			cfg_req->hw_cfg_args.ctxt_to_hw_map != ctx_data)
+			continue;
+
+		list_del_init(&cfg_req->list);
+		list_add_tail(&cfg_req->list, &hw_mgr->free_req_list);
+	}
+
+	CAM_DBG(CAM_JPEG, "X: JPEG flush ctx");
+
+	return 0;
+}
+
+static int cam_jpeg_mgr_flush_req(void *hw_mgr_priv,
+	struct cam_jpeg_hw_ctx_data *ctx_data,
+	struct cam_hw_flush_args *flush_args)
+{
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_cfg_req *cfg_req = NULL;
+	struct cam_jpeg_hw_cfg_req *req_temp = NULL;
+	long request_id = 0;
+	uint32_t dev_type;
+	struct cam_jpeg_hw_cfg_req *p_cfg_req = NULL;
+	bool b_req_found = false;
+
+	CAM_DBG(CAM_JPEG, "E: JPEG flush req");
+
+	if (!hw_mgr || !ctx_data || !flush_args) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	if (flush_args->num_req_pending)
+		return 0;
+
+	request_id = (uintptr_t)flush_args->flush_req_active[0];
+
+	if (!flush_args->num_req_active)
+		return 0;
+
+	if (request_id <= 0) {
+		CAM_ERR(CAM_JPEG, "Invalid red id %ld", request_id);
+		return -EINVAL;
+	}
+
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	p_cfg_req = hw_mgr->dev_hw_cfg_args[dev_type][0];
+	if (hw_mgr->device_in_use[dev_type][0] == true &&
+		p_cfg_req != NULL) {
+		if (((struct cam_jpeg_hw_ctx_data *)
+			p_cfg_req->hw_cfg_args.ctxt_to_hw_map == ctx_data) &&
+			(p_cfg_req->req_id == request_id)) {
+			cam_jpeg_mgr_stop_deinit_dev(hw_mgr, p_cfg_req,
+				dev_type);
+			list_del_init(&p_cfg_req->list);
+			list_add_tail(&p_cfg_req->list,
+				&hw_mgr->free_req_list);
+			b_req_found = true;
+		}
+	}
+
+	list_for_each_entry_safe(cfg_req, req_temp,
+		&hw_mgr->hw_config_req_list, list) {
+		if ((struct cam_jpeg_hw_ctx_data *)
+			cfg_req->hw_cfg_args.ctxt_to_hw_map != ctx_data)
+			continue;
+
+		if (cfg_req->req_id != request_id)
+			continue;
+
+		list_del_init(&cfg_req->list);
+		list_add_tail(&cfg_req->list, &hw_mgr->free_req_list);
+		b_req_found = true;
+		break;
+	}
+
+	if (!b_req_found) {
+		CAM_ERR(CAM_JPEG, "req not found %ld", request_id);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_JPEG, "X: JPEG flush req");
+	return 0;
+}
+
+static int cam_jpeg_mgr_hw_flush(void *hw_mgr_priv, void *flush_hw_args)
+{
+	int rc = 0;
+	struct cam_hw_flush_args *flush_args = flush_hw_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+
+	if (!hw_mgr || !flush_args || !flush_args->ctxt_to_hw_map) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)flush_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	if ((flush_args->flush_type >= CAM_FLUSH_TYPE_MAX) ||
+		(flush_args->flush_type < CAM_FLUSH_TYPE_REQ)) {
+		CAM_ERR(CAM_JPEG, "Invalid flush type: %d",
+			flush_args->flush_type);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data);
+		if ((rc))
+			CAM_ERR(CAM_JPEG, "Flush failed %d", rc);
+		break;
+	case CAM_FLUSH_TYPE_REQ:
+		rc = cam_jpeg_mgr_flush_req(hw_mgr_priv, ctx_data, flush_args);
+		break;
+	default:
+		CAM_ERR(CAM_JPEG, "Invalid flush type: %d",
+			flush_args->flush_type);
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_hw_stop(void *hw_mgr_priv, void *stop_hw_args)
+{
+	int rc;
+	struct cam_hw_stop_args *stop_args =
+		(struct cam_hw_stop_args *)stop_hw_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+
+	if (!hw_mgr || !stop_args || !stop_args->ctxt_to_hw_map) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)stop_args->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+
+	rc = cam_jpeg_mgr_flush(hw_mgr_priv, ctx_data);
+	if ((rc))
+		CAM_ERR(CAM_JPEG, "flush failed %d", rc);
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_release_hw(void *hw_mgr_priv, void *release_hw_args)
+{
+	int rc;
+	struct cam_hw_release_args *release_hw = release_hw_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	uint32_t dev_type;
+
+	if (!hw_mgr || !release_hw || !release_hw->ctxt_to_hw_map) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	ctx_data = (struct cam_jpeg_hw_ctx_data *)release_hw->ctxt_to_hw_map;
+	if (!ctx_data->in_use) {
+		CAM_ERR(CAM_JPEG, "ctx is not in use");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EINVAL;
+	}
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 0) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_JPEG, "Error Unbalanced deinit");
+		return -EFAULT;
+	}
+
+	hw_mgr->cdm_info[dev_type][0].ref_cnt--;
+	if (!(hw_mgr->cdm_info[dev_type][0].ref_cnt)) {
+		if (cam_cdm_stream_off(
+			hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+			CAM_ERR(CAM_JPEG, "CDM stream off failed %d",
+				hw_mgr->cdm_info[dev_type][0].cdm_handle);
+		}
+		/* release cdm handle */
+		cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+	}
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	rc = cam_jpeg_mgr_release_ctx(hw_mgr, ctx_data);
+	if (rc) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_JPEG, "JPEG release ctx failed");
+		kfree(ctx_data->cdm_cmd);
+		ctx_data->cdm_cmd = NULL;
+
+		return -EINVAL;
+	}
+
+	kfree(ctx_data->cdm_cmd);
+	ctx_data->cdm_cmd = NULL;
+	CAM_DBG(CAM_JPEG, "handle %llu", ctx_data);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_acquire_hw(void *hw_mgr_priv, void *acquire_hw_args)
+{
+	int rc = 0;
+	int32_t ctx_id = 0;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_jpeg_hw_ctx_data *ctx_data = NULL;
+	struct cam_hw_acquire_args *args = acquire_hw_args;
+	struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+	struct cam_cdm_acquire_data cdm_acquire;
+	uint32_t dev_type;
+	uint32_t size = 0;
+
+	if ((!hw_mgr_priv) || (!acquire_hw_args)) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK", hw_mgr_priv,
+			acquire_hw_args);
+		return -EINVAL;
+	}
+
+	if (args->num_acq > 1) {
+		CAM_ERR(CAM_JPEG,
+			"number of resources are wrong: %u",
+			args->num_acq);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&jpeg_dev_acquire_info,
+			(void __user *)args->acquire_info,
+			sizeof(jpeg_dev_acquire_info))) {
+		CAM_ERR(CAM_JPEG, "copy failed");
+		return -EFAULT;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	ctx_id = cam_jpeg_mgr_get_free_ctx(hw_mgr);
+	if (ctx_id >= CAM_JPEG_CTX_MAX) {
+		CAM_ERR(CAM_JPEG, "No free ctx space in hw_mgr");
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		return -EFAULT;
+	}
+
+	ctx_data = &hw_mgr->ctx_data[ctx_id];
+
+	ctx_data->cdm_cmd =
+		kzalloc(((sizeof(struct cam_cdm_bl_request)) +
+			((CAM_JPEG_HW_ENTRIES_MAX - 1) *
+			sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!ctx_data->cdm_cmd) {
+		rc = -ENOMEM;
+		goto jpeg_release_ctx;
+	}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->jpeg_dev_acquire_info = jpeg_dev_acquire_info;
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	if (ctx_data->jpeg_dev_acquire_info.dev_type >=
+		CAM_JPEG_RES_TYPE_MAX) {
+		rc = -EINVAL;
+		goto acq_cdm_hdl_failed;
+	}
+	dev_type = ctx_data->jpeg_dev_acquire_info.dev_type;
+	if (!hw_mgr->cdm_info[dev_type][0].ref_cnt) {
+
+		if (dev_type == CAM_JPEG_RES_TYPE_ENC) {
+			memcpy(cdm_acquire.identifier,
+				"jpegenc", sizeof("jpegenc"));
+		} else {
+			memcpy(cdm_acquire.identifier,
+				"jpegdma", sizeof("jpegdma"));
+		}
+		cdm_acquire.cell_index = 0;
+		cdm_acquire.handle = 0;
+		cdm_acquire.userdata = ctx_data;
+		if (hw_mgr->cdm_reg_map[dev_type][0]) {
+			cdm_acquire.base_array[0] =
+				hw_mgr->cdm_reg_map[dev_type][0];
+		}
+		cdm_acquire.base_array_cnt = 1;
+		cdm_acquire.id = CAM_CDM_VIRTUAL;
+		cdm_acquire.cam_cdm_callback = NULL;
+
+		rc = cam_cdm_acquire(&cdm_acquire);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "Failed to acquire the CDM HW %d",
+				rc);
+			rc = -EFAULT;
+			goto acq_cdm_hdl_failed;
+		}
+		hw_mgr->cdm_info[dev_type][0].cdm_handle = cdm_acquire.handle;
+		hw_mgr->cdm_info[dev_type][0].cdm_ops = cdm_acquire.ops;
+		hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+	} else {
+		hw_mgr->cdm_info[dev_type][0].ref_cnt++;
+	}
+
+	size =
+	hw_mgr->cdm_info[dev_type][0].cdm_ops->cdm_required_size_changebase();
+
+	if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+		if (cam_cdm_stream_on(
+			hw_mgr->cdm_info[dev_type][0].cdm_handle)) {
+			CAM_ERR(CAM_JPEG, "Can not start cdm (%d)!",
+				hw_mgr->cdm_info[dev_type][0].cdm_handle);
+			rc = -EFAULT;
+			goto start_cdm_hdl_failed;
+		}
+
+	mutex_lock(&ctx_data->ctx_mutex);
+	ctx_data->context_priv = args->context_data;
+
+	args->ctxt_to_hw_map = (void *)&(hw_mgr->ctx_data[ctx_id]);
+
+	mutex_unlock(&ctx_data->ctx_mutex);
+
+	hw_mgr->ctx_data[ctx_id].ctxt_event_cb = args->event_cb;
+
+
+	if (copy_to_user((void __user *)args->acquire_info,
+		&jpeg_dev_acquire_info,
+		sizeof(jpeg_dev_acquire_info))) {
+		rc = -EFAULT;
+		goto copy_to_user_failed;
+	}
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	CAM_DBG(CAM_JPEG, "success ctx_data= %pK", ctx_data);
+
+	return rc;
+
+copy_to_user_failed:
+	if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+		cam_cdm_stream_off(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+start_cdm_hdl_failed:
+	if (hw_mgr->cdm_info[dev_type][0].ref_cnt == 1)
+		cam_cdm_release(hw_mgr->cdm_info[dev_type][0].cdm_handle);
+	hw_mgr->cdm_info[dev_type][0].ref_cnt--;
+acq_cdm_hdl_failed:
+	kfree(ctx_data->cdm_cmd);
+jpeg_release_ctx:
+	cam_jpeg_mgr_release_ctx(hw_mgr, ctx_data);
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_get_hw_caps(void *hw_mgr_priv, void *hw_caps_args)
+{
+	int rc;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *query_cap = hw_caps_args;
+
+	if (!hw_mgr_priv || !hw_caps_args) {
+		CAM_ERR(CAM_JPEG, "Invalid params: %pK %pK",
+			hw_mgr_priv, hw_caps_args);
+		return -EINVAL;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+
+	if (copy_to_user(u64_to_user_ptr(query_cap->caps_handle),
+		&g_jpeg_hw_mgr.jpeg_caps,
+		sizeof(struct cam_jpeg_query_cap_cmd))) {
+		CAM_ERR(CAM_JPEG, "copy_to_user failed");
+		rc = -EFAULT;
+		goto copy_error;
+	}
+	CAM_DBG(CAM_JPEG, "Success");
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return 0;
+
+copy_error:
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+	return rc;
+}
+
+static int cam_jpeg_setup_workqs(void)
+{
+	int rc, i;
+
+	rc = cam_req_mgr_workq_create(
+		"jpeg_command_queue",
+		CAM_JPEG_WORKQ_NUM_TASK,
+		&g_jpeg_hw_mgr.work_process_frame,
+		CRM_WORKQ_USAGE_NON_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+		goto work_process_frame_failed;
+	}
+
+	rc = cam_req_mgr_workq_create(
+		"jpeg_message_queue",
+		CAM_JPEG_WORKQ_NUM_TASK,
+		&g_jpeg_hw_mgr.work_process_irq_cb,
+		CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "unable to create a worker %d", rc);
+		goto work_process_irq_cb_failed;
+	}
+
+	g_jpeg_hw_mgr.process_frame_work_data =
+		kzalloc(sizeof(struct cam_jpeg_process_frame_work_data_t) *
+			CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.process_frame_work_data) {
+		rc = -ENOMEM;
+		goto work_process_frame_data_failed;
+	}
+
+	g_jpeg_hw_mgr.process_irq_cb_work_data =
+		kzalloc(sizeof(struct cam_jpeg_process_irq_work_data_t) *
+			CAM_JPEG_WORKQ_NUM_TASK, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.process_irq_cb_work_data) {
+		rc = -ENOMEM;
+		goto work_process_irq_cb_data_failed;
+	}
+
+	for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+		g_jpeg_hw_mgr.work_process_irq_cb->task.pool[i].payload =
+			&g_jpeg_hw_mgr.process_irq_cb_work_data[i];
+
+	for (i = 0; i < CAM_JPEG_WORKQ_NUM_TASK; i++)
+		g_jpeg_hw_mgr.work_process_frame->task.pool[i].payload =
+			&g_jpeg_hw_mgr.process_frame_work_data[i];
+
+	INIT_LIST_HEAD(&g_jpeg_hw_mgr.hw_config_req_list);
+	INIT_LIST_HEAD(&g_jpeg_hw_mgr.free_req_list);
+	for (i = 0; i < CAM_JPEG_HW_CFG_Q_MAX; i++) {
+		INIT_LIST_HEAD(&(g_jpeg_hw_mgr.req_list[i].list));
+		list_add_tail(&(g_jpeg_hw_mgr.req_list[i].list),
+			&(g_jpeg_hw_mgr.free_req_list));
+	}
+
+	return rc;
+
+work_process_irq_cb_data_failed:
+	kfree(g_jpeg_hw_mgr.process_frame_work_data);
+work_process_frame_data_failed:
+	cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_irq_cb);
+work_process_irq_cb_failed:
+	cam_req_mgr_workq_destroy(&g_jpeg_hw_mgr.work_process_frame);
+work_process_frame_failed:
+
+	return rc;
+}
+
+static int cam_jpeg_init_devices(struct device_node *of_node,
+	uint32_t *p_num_enc_dev,
+	uint32_t *p_num_dma_dev)
+{
+	int count, i, rc;
+	uint32_t num_dev;
+	uint32_t num_dma_dev;
+	const char *name = NULL;
+	struct device_node *child_node = NULL;
+	struct platform_device *child_pdev = NULL;
+	struct cam_hw_intf *child_dev_intf = NULL;
+	struct cam_hw_info *enc_hw = NULL;
+	struct cam_hw_info *dma_hw = NULL;
+	struct cam_hw_soc_info *enc_soc_info = NULL;
+	struct cam_hw_soc_info *dma_soc_info = NULL;
+
+	if (!p_num_enc_dev || !p_num_dma_dev) {
+		rc = -EINVAL;
+		goto num_dev_failed;
+	}
+	count = of_property_count_strings(of_node, "compat-hw-name");
+	if (!count) {
+		CAM_ERR(CAM_JPEG,
+			"no compat hw found in dev tree, count = %d",
+			count);
+		rc = -EINVAL;
+		goto num_dev_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-jpeg-enc", &num_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "read num enc devices failed %d", rc);
+		goto num_enc_failed;
+	}
+	g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dev, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]) {
+		rc = -ENOMEM;
+		CAM_ERR(CAM_JPEG, "getting number of dma dev nodes failed");
+		goto num_enc_failed;
+	}
+
+	rc = of_property_read_u32(of_node, "num-jpeg-dma", &num_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "get num dma dev nodes failed %d", rc);
+		goto num_dma_failed;
+	}
+
+	g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA] = kzalloc(
+		sizeof(struct cam_hw_intf *) * num_dma_dev, GFP_KERNEL);
+	if (!g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]) {
+		rc = -ENOMEM;
+		goto num_dma_failed;
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "compat-hw-name",
+			i, &name);
+		if (rc) {
+			CAM_ERR(CAM_JPEG, "getting dev object name failed");
+			goto compat_hw_name_failed;
+		}
+
+		child_node = of_find_node_by_name(NULL, name);
+		if (!child_node) {
+			CAM_ERR(CAM_JPEG,
+				"error! Cannot find node in dtsi %s", name);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+
+		child_pdev = of_find_device_by_node(child_node);
+		if (!child_pdev) {
+			CAM_ERR(CAM_JPEG, "failed to find device on bus %s",
+				child_node->name);
+			rc = -ENODEV;
+			of_node_put(child_node);
+			goto compat_hw_name_failed;
+		}
+
+		child_dev_intf = (struct cam_hw_intf *)platform_get_drvdata(
+			child_pdev);
+		if (!child_dev_intf) {
+			CAM_ERR(CAM_JPEG, "no child device");
+			of_node_put(child_node);
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+		CAM_DBG(CAM_JPEG, "child_intf %pK type %d id %d",
+			child_dev_intf,
+			child_dev_intf->hw_type,
+			child_dev_intf->hw_idx);
+
+		if ((child_dev_intf->hw_type == CAM_JPEG_DEV_ENC &&
+			child_dev_intf->hw_idx >= num_dev) ||
+			(child_dev_intf->hw_type == CAM_JPEG_DEV_DMA &&
+			child_dev_intf->hw_idx >= num_dma_dev)) {
+			CAM_ERR(CAM_JPEG, "index out of range");
+			rc = -ENODEV;
+			goto compat_hw_name_failed;
+		}
+		g_jpeg_hw_mgr.devices[child_dev_intf->hw_type]
+			[child_dev_intf->hw_idx] = child_dev_intf;
+
+		of_node_put(child_node);
+	}
+
+	enc_hw = (struct cam_hw_info *)
+		g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC][0]->hw_priv;
+	enc_soc_info = &enc_hw->soc_info;
+	g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_ENC][0] =
+		&enc_soc_info->reg_map[0];
+	dma_hw = (struct cam_hw_info *)
+		g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA][0]->hw_priv;
+	dma_soc_info = &dma_hw->soc_info;
+	g_jpeg_hw_mgr.cdm_reg_map[CAM_JPEG_DEV_DMA][0] =
+		&dma_soc_info->reg_map[0];
+
+	*p_num_enc_dev = num_dev;
+	*p_num_dma_dev = num_dma_dev;
+
+	return rc;
+
+compat_hw_name_failed:
+	kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_DMA]);
+num_dma_failed:
+	kfree(g_jpeg_hw_mgr.devices[CAM_JPEG_DEV_ENC]);
+num_enc_failed:
+num_dev_failed:
+
+	return rc;
+}
+
+static int cam_jpeg_mgr_cmd(void *hw_mgr_priv, void *cmd_args)
+{
+	int rc = 0;
+	struct cam_hw_cmd_args *hw_cmd_args = cmd_args;
+	struct cam_jpeg_hw_mgr *hw_mgr = hw_mgr_priv;
+
+	if (!hw_mgr_priv || !cmd_args) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	switch (hw_cmd_args->cmd_type) {
+	case CAM_HW_MGR_CMD_DUMP_PF_INFO:
+		cam_jpeg_mgr_print_io_bufs(
+			hw_cmd_args->u.pf_args.pf_data.packet,
+			hw_mgr->iommu_hdl,
+			hw_mgr->iommu_sec_hdl,
+			hw_cmd_args->u.pf_args.buf_info,
+			hw_cmd_args->u.pf_args.mem_found);
+		break;
+	default:
+		CAM_ERR(CAM_JPEG, "Invalid cmd");
+	}
+
+	return rc;
+}
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node, uint64_t *hw_mgr_hdl,
+	int *iommu_hdl)
+{
+	int i, rc;
+	uint32_t num_dev;
+	uint32_t num_dma_dev;
+	struct cam_hw_mgr_intf *hw_mgr_intf;
+	struct cam_iommu_handle cdm_handles;
+
+	hw_mgr_intf = (struct cam_hw_mgr_intf *)hw_mgr_hdl;
+	if (!of_node || !hw_mgr_intf) {
+		CAM_ERR(CAM_JPEG, "Invalid args of_node %pK hw_mgr %pK",
+			of_node, hw_mgr_intf);
+		return -EINVAL;
+	}
+
+	memset(hw_mgr_hdl, 0x0, sizeof(struct cam_hw_mgr_intf));
+	hw_mgr_intf->hw_mgr_priv = &g_jpeg_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_jpeg_mgr_get_hw_caps;
+	hw_mgr_intf->hw_acquire = cam_jpeg_mgr_acquire_hw;
+	hw_mgr_intf->hw_release = cam_jpeg_mgr_release_hw;
+	hw_mgr_intf->hw_prepare_update = cam_jpeg_mgr_prepare_hw_update;
+	hw_mgr_intf->hw_config = cam_jpeg_mgr_config_hw;
+	hw_mgr_intf->hw_flush = cam_jpeg_mgr_hw_flush;
+	hw_mgr_intf->hw_stop = cam_jpeg_mgr_hw_stop;
+	hw_mgr_intf->hw_cmd = cam_jpeg_mgr_cmd;
+
+	mutex_init(&g_jpeg_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&g_jpeg_hw_mgr.hw_mgr_lock);
+
+	for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+		mutex_init(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+	rc = cam_jpeg_init_devices(of_node, &num_dev, &num_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg init devices %d", rc);
+		goto smmu_get_failed;
+	}
+
+	rc = cam_smmu_get_handle("jpeg", &g_jpeg_hw_mgr.iommu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg get iommu handle failed %d", rc);
+		goto smmu_get_failed;
+	}
+
+	CAM_DBG(CAM_JPEG, "mmu handle :%d", g_jpeg_hw_mgr.iommu_hdl);
+	rc = cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_ATTACH);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "jpeg attach failed: %d", rc);
+		goto jpeg_attach_failed;
+	}
+
+	rc = cam_cdm_get_iommu_handle("jpegenc", &cdm_handles);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "acquire cdm iommu handle Fail  %d", rc);
+		g_jpeg_hw_mgr.cdm_iommu_hdl = -1;
+		g_jpeg_hw_mgr.cdm_iommu_hdl_secure = -1;
+		goto cdm_iommu_failed;
+	}
+	g_jpeg_hw_mgr.cdm_iommu_hdl = cdm_handles.non_secure;
+	g_jpeg_hw_mgr.cdm_iommu_hdl_secure = cdm_handles.secure;
+
+	g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.non_secure =
+		g_jpeg_hw_mgr.iommu_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.dev_iommu_handle.secure =
+		g_jpeg_hw_mgr.iommu_sec_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.non_secure =
+		g_jpeg_hw_mgr.cdm_iommu_hdl;
+	g_jpeg_hw_mgr.jpeg_caps.cdm_iommu_handle.secure =
+		g_jpeg_hw_mgr.cdm_iommu_hdl_secure;
+	g_jpeg_hw_mgr.jpeg_caps.num_enc = num_dev;
+	g_jpeg_hw_mgr.jpeg_caps.num_dma = num_dma_dev;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.major = 4;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.minor = 2;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.incr  = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_ENC].hw_ver.reserved = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.major = 4;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.minor = 2;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.incr  = 0;
+	g_jpeg_hw_mgr.jpeg_caps.dev_ver[CAM_JPEG_DEV_DMA].hw_ver.reserved = 0;
+
+	rc = cam_jpeg_setup_workqs();
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "setup work qs failed  %d", rc);
+		goto cdm_iommu_failed;
+	}
+
+	if (iommu_hdl)
+		*iommu_hdl = g_jpeg_hw_mgr.iommu_hdl;
+
+	return rc;
+
+cdm_iommu_failed:
+	cam_smmu_ops(g_jpeg_hw_mgr.iommu_hdl, CAM_SMMU_DETACH);
+	cam_smmu_destroy_handle(g_jpeg_hw_mgr.iommu_hdl);
+jpeg_attach_failed:
+	g_jpeg_hw_mgr.iommu_hdl = 0;
+smmu_get_failed:
+	mutex_destroy(&g_jpeg_hw_mgr.hw_mgr_mutex);
+	for (i = 0; i < CAM_JPEG_CTX_MAX; i++)
+		mutex_destroy(&g_jpeg_hw_mgr.ctx_data[i].ctx_mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
new file mode 100644
index 0000000..05492ad
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/cam_jpeg_hw_mgr.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_H
+#define CAM_JPEG_HW_MGR_H
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <media/cam_jpeg.h>
+
+#include "cam_jpeg_hw_intf.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_hw_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_mem_mgr.h"
+
+#define CAM_JPEG_WORKQ_NUM_TASK      30
+#define CAM_JPEG_WORKQ_TASK_CMD_TYPE 1
+#define CAM_JPEG_WORKQ_TASK_MSG_TYPE 2
+#define CAM_JPEG_HW_CFG_Q_MAX        50
+
+/**
+ * struct cam_jpeg_process_frame_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to command data
+ * @request_id: Request id
+ */
+struct cam_jpeg_process_frame_work_data_t {
+	uint32_t type;
+	void *data;
+	uintptr_t request_id;
+};
+
+/**
+ * struct cam_jpeg_process_irq_work_data_t
+ *
+ * @type: Task type
+ * @data: Pointer to message data
+ * @result_size: Result size of enc/dma
+ * @irq_status: IRQ status
+ */
+struct cam_jpeg_process_irq_work_data_t {
+	uint32_t type;
+	void *data;
+	int32_t result_size;
+	uint32_t irq_status;
+};
+
+/**
+ * struct cam_jpeg_hw_cdm_info_t
+ *
+ * @ref_cnt: Ref count of how many times device type is acquired
+ * @cdm_handle: Cdm handle
+ * @cdm_ops: Cdm ops struct
+ */
+struct cam_jpeg_hw_cdm_info_t {
+	int ref_cnt;
+	uint32_t cdm_handle;
+	struct cam_cdm_utils_ops *cdm_ops;
+};
+
+/**
+ * struct cam_jpeg_hw_cfg_req_t
+ *
+ * @list_head: List head
+ * @hw_cfg_args: Hw config args
+ * @dev_type: Dev type for cfg request
+ * @req_id: Request Id
+ */
+struct cam_jpeg_hw_cfg_req {
+	struct list_head list;
+	struct cam_hw_config_args hw_cfg_args;
+	uint32_t dev_type;
+	uintptr_t req_id;
+};
+
+/**
+ * struct cam_jpeg_hw_ctx_data
+ *
+ * @context_priv: Context private data, cam_context from
+ *     acquire.
+ * @ctx_mutex: Mutex for context
+ * @jpeg_dev_acquire_info: Acquire device info
+ * @ctxt_event_cb: Context callback function
+ * @in_use: Flag for context usage
+ * @wait_complete: Completion info
+ * @cdm_cmd: Cdm cmd submitted for that context.
+ */
+struct cam_jpeg_hw_ctx_data {
+	void *context_priv;
+	struct mutex ctx_mutex;
+	struct cam_jpeg_acquire_dev_info jpeg_dev_acquire_info;
+	cam_hw_event_cb_func ctxt_event_cb;
+	bool in_use;
+	struct completion wait_complete;
+	struct cam_cdm_bl_request *cdm_cmd;
+};
+
+/**
+ * struct cam_jpeg_hw_mgr
+ * @hw_mgr_mutex: Mutex for JPEG hardware manager
+ * @hw_mgr_lock: Spinlock for JPEG hardware manager
+ * @ctx_data: Context data
+ * @jpeg_caps: JPEG capabilities
+ * @iommu_hdl: Non secure IOMMU handle
+ * @iommu_sec_hdl: Secure IOMMU handle
+ * @work_process_frame: Work queue for hw config requests
+ * @work_process_irq_cb: Work queue for processing IRQs.
+ * @process_frame_work_data: Work data pool for hw config
+ *     requests
+ * @process_irq_cb_work_data: Work data pool for irq requests
+ * @cdm_iommu_hdl: Iommu handle received from cdm
+ * @cdm_iommu_hdl_secure: Secure iommu handle received from cdm
+ * @devices: Core hw Devices of JPEG hardware manager
+ * @cdm_info: Cdm info for each core device.
+ * @cdm_reg_map: Regmap of each device for cdm.
+ * @device_in_use: Flag device being used for an active request
+ * @dev_hw_cfg_args: Current cfg request per core dev
+ * @hw_config_req_list: Pending hw update requests list
+ * @free_req_list: Free nodes for above list
+ * @req_list: Nodes of hw update list
+ */
+struct cam_jpeg_hw_mgr {
+	struct mutex hw_mgr_mutex;
+	spinlock_t hw_mgr_lock;
+	struct cam_jpeg_hw_ctx_data ctx_data[CAM_JPEG_CTX_MAX];
+	struct cam_jpeg_query_cap_cmd jpeg_caps;
+	int32_t iommu_hdl;
+	int32_t iommu_sec_hdl;
+	struct cam_req_mgr_core_workq *work_process_frame;
+	struct cam_req_mgr_core_workq *work_process_irq_cb;
+	struct cam_jpeg_process_frame_work_data_t *process_frame_work_data;
+	struct cam_jpeg_process_irq_work_data_t *process_irq_cb_work_data;
+	int cdm_iommu_hdl;
+	int cdm_iommu_hdl_secure;
+
+	struct cam_hw_intf **devices[CAM_JPEG_DEV_TYPE_MAX];
+	struct cam_jpeg_hw_cdm_info_t cdm_info[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	struct cam_soc_reg_map *cdm_reg_map[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	uint32_t device_in_use[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+	struct cam_jpeg_hw_cfg_req *dev_hw_cfg_args[CAM_JPEG_DEV_TYPE_MAX]
+		[CAM_JPEG_NUM_DEV_PER_RES_MAX];
+
+	struct list_head hw_config_req_list;
+	struct list_head free_req_list;
+	struct cam_jpeg_hw_cfg_req req_list[CAM_JPEG_HW_CFG_Q_MAX];
+};
+
+#endif /* CAM_JPEG_HW_MGR_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
new file mode 100644
index 0000000..2f3a47f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_intf.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_JPEG_HW_INTF_H
+#define CAM_JPEG_HW_INTF_H
+
+#include "cam_cpas_api.h"
+
+#define CAM_JPEG_CTX_MAX              8
+#define CAM_JPEG_DEV_PER_TYPE_MAX     1
+
+#define CAM_JPEG_CMD_BUF_MAX_SIZE     128
+#define CAM_JPEG_MSG_BUF_MAX_SIZE     CAM_JPEG_CMD_BUF_MAX_SIZE
+
+#define JPEG_VOTE                     640000000
+
+enum cam_jpeg_hw_type {
+	CAM_JPEG_DEV_ENC,
+	CAM_JPEG_DEV_DMA,
+};
+
+struct cam_jpeg_set_irq_cb {
+	int32_t (*jpeg_hw_mgr_cb)(uint32_t irq_status,
+		int32_t result_size, void *data);
+	void *data;
+	uint32_t b_set_cb;
+};
+
+enum cam_jpeg_cmd_type {
+	CAM_JPEG_CMD_CDM_CFG,
+	CAM_JPEG_CMD_SET_IRQ_CB,
+	CAM_JPEG_CMD_MAX,
+};
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
new file mode 100644
index 0000000..a05097a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include/cam_jpeg_hw_mgr_intf.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_JPEG_HW_MGR_INTF_H
+#define CAM_JPEG_HW_MGR_INTF_H
+
+#include <uapi/media/cam_jpeg.h>
+#include <uapi/media/cam_defs.h>
+#include <linux/of.h>
+
+int cam_jpeg_hw_mgr_init(struct device_node *of_node,
+	uint64_t *hw_mgr_hdl, int *iommu_hdl);
+
+#endif /* CAM_JPEG_HW_MGR_INTF_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
new file mode 100644
index 0000000..0748910
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_dma_dev.o jpeg_dma_core.o jpeg_dma_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
new file mode 100644
index 0000000..26ce434
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_dma_dev->soc_info;
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core_info->core_mutex);
+	if (++core_info->ref_count > 1) {
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = JPEG_VOTE;
+	axi_vote.uncompressed_bw = JPEG_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+		goto cpas_failed;
+	}
+
+	rc = cam_jpeg_dma_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+		goto soc_failed;
+	}
+
+	mutex_unlock(&core_info->core_mutex);
+
+	return 0;
+
+soc_failed:
+	cam_cpas_stop(core_info->cpas_handle);
+cpas_failed:
+	--core_info->ref_count;
+	mutex_unlock(&core_info->core_mutex);
+
+	return rc;
+}
+
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_dma_dev->soc_info;
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core_info->core_mutex);
+	if (--core_info->ref_count > 0) {
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	if (core_info->ref_count < 0) {
+		CAM_ERR(CAM_JPEG, "ref cnt %d", core_info->ref_count);
+		core_info->ref_count = 0;
+		mutex_unlock(&core_info->core_mutex);
+		return -EFAULT;
+	}
+
+	rc = cam_jpeg_dma_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "soc enable failed %d", rc);
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+	mutex_unlock(&core_info->core_mutex);
+
+	return 0;
+}
+
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_dma_dev = device_priv;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_JPEG_CMD_MAX) {
+		CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+
+	switch (cmd_type) {
+	case CAM_JPEG_CMD_SET_IRQ_CB:
+	{
+		struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_JPEG, "cmd args NULL");
+			return -EINVAL;
+		}
+		if (irq_cb->b_set_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb =
+				irq_cb->jpeg_hw_mgr_cb;
+			core_info->irq_cb.data = irq_cb->data;
+		} else {
+			core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+			core_info->irq_cb.data = NULL;
+		}
+		rc = 0;
+		break;
+	}
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data)
+{
+	return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
new file mode 100644
index 0000000..d721fd8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_core.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_JPEG_DMA_CORE_H
+#define CAM_JPEG_DMA_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+#include "cam_jpeg_hw_intf.h"
+
+struct cam_jpeg_dma_device_hw_info {
+	uint32_t reserved;
+};
+
+enum cam_jpeg_dma_core_state {
+	CAM_JPEG_DMA_CORE_NOT_READY,
+	CAM_JPEG_DMA_CORE_READY,
+	CAM_JPEG_DMA_CORE_RESETTING,
+	CAM_JPEG_DMA_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_dma_device_core_info {
+	enum cam_jpeg_dma_core_state core_state;
+	struct cam_jpeg_dma_device_hw_info *jpeg_dma_hw_info;
+	uint32_t cpas_handle;
+	struct cam_jpeg_set_irq_cb irq_cb;
+	int32_t ref_count;
+	struct mutex core_mutex;
+};
+
+int cam_jpeg_dma_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_dma_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_dma_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_DMA_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
new file mode 100644
index 0000000..abe0c5a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_dev.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_dma_core.h"
+#include "jpeg_dma_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_jpeg_dma_device_hw_info cam_jpeg_dma_hw_info = {
+	.reserved = 0,
+};
+EXPORT_SYMBOL(cam_jpeg_dma_hw_info);
+
+static int cam_jpeg_dma_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_jpeg_dma_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = soc_info->dev;
+	memcpy(cpas_register_params.identifier, "jpeg-dma",
+		sizeof("jpeg-dma"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+static int cam_jpeg_dma_unregister_cpas(
+	struct cam_jpeg_dma_device_core_info *core_info)
+{
+	int rc;
+
+	rc = cam_cpas_unregister_client(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+	core_info->cpas_handle = 0;
+
+	return rc;
+}
+
+static int cam_jpeg_dma_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_dma_dev = NULL;
+	struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	int rc;
+
+	jpeg_dma_dev_intf = platform_get_drvdata(pdev);
+	if (!jpeg_dma_dev_intf) {
+		CAM_ERR(CAM_JPEG, "error No data in pdev");
+		return -EINVAL;
+	}
+
+	jpeg_dma_dev = jpeg_dma_dev_intf->hw_priv;
+	if (!jpeg_dma_dev) {
+		CAM_ERR(CAM_JPEG, "error HW data is NULL");
+		rc = -ENODEV;
+		goto free_jpeg_hw_intf;
+	}
+
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+	if (!core_info) {
+		CAM_ERR(CAM_JPEG, "error core data NULL");
+		goto deinit_soc;
+	}
+
+	rc = cam_jpeg_dma_unregister_cpas(core_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+	mutex_destroy(&core_info->core_mutex);
+	kfree(core_info);
+
+deinit_soc:
+	rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+	mutex_destroy(&jpeg_dma_dev->hw_mutex);
+	kfree(jpeg_dma_dev);
+
+free_jpeg_hw_intf:
+	kfree(jpeg_dma_dev_intf);
+	return rc;
+}
+
+static int cam_jpeg_dma_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_dma_dev = NULL;
+	struct cam_hw_intf *jpeg_dma_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_jpeg_dma_device_core_info *core_info = NULL;
+	struct cam_jpeg_dma_device_hw_info *hw_info = NULL;
+	int rc;
+
+	jpeg_dma_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!jpeg_dma_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &jpeg_dma_dev_intf->hw_idx);
+
+	jpeg_dma_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!jpeg_dma_dev) {
+		rc = -ENOMEM;
+		goto error_alloc_dev;
+	}
+	jpeg_dma_dev->soc_info.pdev = pdev;
+	jpeg_dma_dev->soc_info.dev = &pdev->dev;
+	jpeg_dma_dev->soc_info.dev_name = pdev->name;
+	jpeg_dma_dev_intf->hw_priv = jpeg_dma_dev;
+	jpeg_dma_dev_intf->hw_ops.init = cam_jpeg_dma_init_hw;
+	jpeg_dma_dev_intf->hw_ops.deinit = cam_jpeg_dma_deinit_hw;
+	jpeg_dma_dev_intf->hw_ops.process_cmd = cam_jpeg_dma_process_cmd;
+	jpeg_dma_dev_intf->hw_type = CAM_JPEG_DEV_DMA;
+
+	platform_set_drvdata(pdev, jpeg_dma_dev_intf);
+	jpeg_dma_dev->core_info =
+		kzalloc(sizeof(struct cam_jpeg_dma_device_core_info),
+			GFP_KERNEL);
+	if (!jpeg_dma_dev->core_info) {
+		rc = -ENOMEM;
+		goto error_alloc_core;
+	}
+	core_info = (struct cam_jpeg_dma_device_core_info *)
+		jpeg_dma_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_JPEG, " No jpeg_dma hardware info");
+		rc = -EINVAL;
+		goto error_match_dev;
+	}
+	hw_info = (struct cam_jpeg_dma_device_hw_info *)match_dev->data;
+	core_info->jpeg_dma_hw_info = hw_info;
+	core_info->core_state = CAM_JPEG_DMA_CORE_NOT_READY;
+	mutex_init(&core_info->core_mutex);
+
+	rc = cam_jpeg_dma_init_soc_resources(&jpeg_dma_dev->soc_info,
+		cam_jpeg_dma_irq,
+		jpeg_dma_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "failed to init_soc %d", rc);
+		goto error_init_soc;
+	}
+
+	rc = cam_jpeg_dma_register_cpas(&jpeg_dma_dev->soc_info,
+		core_info, jpeg_dma_dev_intf->hw_idx);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+		goto error_reg_cpas;
+	}
+	jpeg_dma_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&jpeg_dma_dev->hw_mutex);
+	spin_lock_init(&jpeg_dma_dev->hw_lock);
+	init_completion(&jpeg_dma_dev->hw_complete);
+
+	CAM_DBG(CAM_JPEG, " hwidx %d", jpeg_dma_dev_intf->hw_idx);
+
+	return rc;
+
+error_reg_cpas:
+	rc = cam_soc_util_release_platform_resource(&jpeg_dma_dev->soc_info);
+error_init_soc:
+	mutex_destroy(&core_info->core_mutex);
+error_match_dev:
+	kfree(jpeg_dma_dev->core_info);
+error_alloc_core:
+	kfree(jpeg_dma_dev);
+error_alloc_dev:
+	kfree(jpeg_dma_dev_intf);
+	return rc;
+}
+
+static const struct of_device_id cam_jpeg_dma_dt_match[] = {
+	{
+		.compatible = "qcom,cam_jpeg_dma",
+		.data = &cam_jpeg_dma_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_dma_dt_match);
+
+static struct platform_driver cam_jpeg_dma_driver = {
+	.probe = cam_jpeg_dma_probe,
+	.remove = cam_jpeg_dma_remove,
+	.driver = {
+		.name = "cam-jpeg-dma",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_dma_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_jpeg_dma_init_module(void)
+{
+	return platform_driver_register(&cam_jpeg_dma_driver);
+}
+
+static void __exit cam_jpeg_dma_exit_module(void)
+{
+	platform_driver_unregister(&cam_jpeg_dma_driver);
+}
+
+module_init(cam_jpeg_dma_init_module);
+module_exit(cam_jpeg_dma_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_DMA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
new file mode 100644
index 0000000..3a78b47
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_dma_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_dma_irq_handler, void *irq_data)
+{
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		jpeg_dma_irq_handler,
+		irq_data);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
new file mode 100644
index 0000000..cab2108
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_dma_hw/jpeg_dma_soc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_JPEG_DMA_SOC_H_
+#define _CAM_JPEG_DMA_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_dma_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_dma_irq_handler, void *irq_data);
+
+int cam_jpeg_dma_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_dma_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_DMA_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
new file mode 100644
index 0000000..63ceb95
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_hw_mgr/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw
+
+obj-$(CONFIG_SPECTRA_CAMERA) += jpeg_enc_dev.o jpeg_enc_core.o jpeg_enc_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
new file mode 100644
index 0000000..94b2e0e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/cam_jpeg_enc_hw_info_ver_4_2_0.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_JPEG_ENC_HW_INFO_TITAN170_H
+#define CAM_JPEG_ENC_HW_INFO_TITAN170_H
+
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK 0x00000001
+#define CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_SHIFT 0x00000000
+
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK 0x10000000
+#define CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_SHIFT 0x0000000a
+
+#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK 0x8000000
+#define CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_SHIFT 0x0000001b
+
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_MASK 0x00000800
+#define CAM_JPEG_HW_IRQ_STATUS_BUS_ERROR_SHIFT 0x0000000b
+
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF      (0x1<<19)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR     (0x1<<20)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR   (0x1<<21)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF (0x1<<22)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW    (0x1<<23)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM       (0x1<<24)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ   (0x1<<25)
+#define CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM       (0x1<<26)
+#define CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK        (0x1<<29)
+
+#define CAM_JPEG_HW_MASK_COMP_FRAMEDONE \
+		CAM_JPEG_HW_IRQ_STATUS_FRAMEDONE_MASK
+#define CAM_JPEG_HW_MASK_COMP_RESET_ACK \
+		CAM_JPEG_HW_IRQ_STATUS_RESET_ACK_MASK
+#define CAM_JPEG_HW_MASK_COMP_ERR \
+		(CAM_JPEG_HW_IRQ_STATUS_DCD_UNESCAPED_FF | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_HUFFMAN_ERROR | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_COEFFICIENT_ERR | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_BIT_STUFF | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_SCAN_UNDERFLOW | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_INVALID_RSM_SEQ | \
+		CAM_JPEG_HW_IRQ_STATUS_DCD_MISSING_RSM | \
+		CAM_JPEG_HW_IRQ_STATUS_VIOLATION_MASK)
+
+static struct cam_jpeg_enc_device_hw_info cam_jpeg_enc_hw_info = {
+	.reg_offset = {
+		.hw_version = 0x0,
+		.int_clr = 0x1c,
+		.int_status = 0x20,
+		.int_mask = 0x18,
+		.hw_cmd = 0x10,
+		.reset_cmd = 0x8,
+		.encode_size = 0x180,
+	},
+	.reg_val = {
+		.int_clr_clearall = 0xFFFFFFFF,
+		.int_mask_disable_all = 0x00000000,
+		.int_mask_enable_all = 0xFFFFFFFF,
+		.hw_cmd_start = 0x00000001,
+		.reset_cmd = 0x00032093,
+		.hw_cmd_stop = 0x00000002,
+	},
+	.int_status = {
+		.framedone = CAM_JPEG_HW_MASK_COMP_FRAMEDONE,
+		.resetdone = CAM_JPEG_HW_MASK_COMP_RESET_ACK,
+		.iserror = CAM_JPEG_HW_MASK_COMP_ERR,
+		.stopdone = CAM_JPEG_HW_IRQ_STATUS_STOP_DONE_MASK,
+	}
+};
+
+#endif /* CAM_JPEG_ENC_HW_INFO_TITAN170_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
new file mode 100644
index 0000000..b01a07a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/videodev2.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+
+#include "cam_io_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+
+#define CAM_JPEG_HW_IRQ_IS_FRAME_DONE(jpeg_irq_status, hi) \
+	((jpeg_irq_status) & (hi)->int_status.framedone)
+#define CAM_JPEG_HW_IRQ_IS_RESET_ACK(jpeg_irq_status, hi) \
+	((jpeg_irq_status) & (hi)->int_status.resetdone)
+#define CAM_JPEG_HW_IRQ_IS_ERR(jpeg_irq_status, hi) \
+	((jpeg_irq_status) & (hi)->int_status.iserror)
+#define CAM_JPEG_HW_IRQ_IS_STOP_DONE(jpeg_irq_status, hi) \
+	((jpeg_irq_status) & (hi)->int_status.stopdone)
+
+#define CAM_JPEG_ENC_RESET_TIMEOUT msecs_to_jiffies(500)
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core_info->core_mutex);
+	if (++core_info->ref_count > 1) {
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = JPEG_VOTE;
+	axi_vote.uncompressed_bw = JPEG_VOTE;
+
+	rc = cam_cpas_start(core_info->cpas_handle,
+		&ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "cpass start failed: %d", rc);
+		goto cpas_failed;
+	}
+
+	rc = cam_jpeg_enc_enable_soc_resources(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "soc enable is failed %d", rc);
+		goto soc_failed;
+	}
+
+	mutex_unlock(&core_info->core_mutex);
+
+	return 0;
+
+soc_failed:
+	cam_cpas_stop(core_info->cpas_handle);
+cpas_failed:
+	--core_info->ref_count;
+	mutex_unlock(&core_info->core_mutex);
+
+	return rc;
+}
+
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid cam_dev_info");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	if (!soc_info || !core_info) {
+		CAM_ERR(CAM_JPEG, "soc_info = %pK core_info = %pK",
+			soc_info, core_info);
+		return -EINVAL;
+	}
+
+	mutex_lock(&core_info->core_mutex);
+	if (--core_info->ref_count > 0) {
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	if (core_info->ref_count < 0) {
+		CAM_ERR(CAM_JPEG, "ref cnt %d", core_info->ref_count);
+		core_info->ref_count = 0;
+		mutex_unlock(&core_info->core_mutex);
+		return -EFAULT;
+	}
+
+	rc = cam_jpeg_enc_disable_soc_resources(soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "soc disable failed %d", rc);
+
+	rc = cam_cpas_stop(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas stop failed: %d", rc);
+
+	mutex_unlock(&core_info->core_mutex);
+
+	return 0;
+}
+
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	uint32_t irq_status = 0;
+	uint32_t encoded_size = 0;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return IRQ_HANDLED;
+	}
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	irq_status = cam_io_r_mb(mem_base +
+		core_info->jpeg_enc_hw_info->reg_offset.int_status);
+
+	cam_io_w_mb(irq_status,
+		soc_info->reg_map[0].mem_base +
+		core_info->jpeg_enc_hw_info->reg_offset.int_clr);
+
+	CAM_DBG(CAM_JPEG, "irq_num %d  irq_status = %x , core_state %d",
+		irq_num, irq_status, core_info->core_state);
+
+	if (CAM_JPEG_HW_IRQ_IS_FRAME_DONE(irq_status, hw_info)) {
+		spin_lock(&jpeg_enc_dev->hw_lock);
+		if (core_info->core_state == CAM_JPEG_ENC_CORE_READY) {
+			encoded_size = cam_io_r_mb(mem_base +
+			core_info->jpeg_enc_hw_info->reg_offset.encode_size);
+			if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+				core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+					encoded_size,
+					core_info->irq_cb.data);
+			} else {
+				CAM_ERR(CAM_JPEG, "unexpected done, no cb");
+			}
+		} else {
+			CAM_ERR(CAM_JPEG, "unexpected done irq");
+		}
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+	}
+	if (CAM_JPEG_HW_IRQ_IS_RESET_ACK(irq_status, hw_info)) {
+		spin_lock(&jpeg_enc_dev->hw_lock);
+		if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+			core_info->core_state = CAM_JPEG_ENC_CORE_READY;
+			complete(&jpeg_enc_dev->hw_complete);
+		} else {
+			CAM_ERR(CAM_JPEG, "unexpected reset irq");
+		}
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+	}
+	if (CAM_JPEG_HW_IRQ_IS_STOP_DONE(irq_status, hw_info)) {
+		spin_lock(&jpeg_enc_dev->hw_lock);
+		if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) {
+			core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+			complete(&jpeg_enc_dev->hw_complete);
+			if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+				core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+					-1,
+					core_info->irq_cb.data);
+			}
+		} else {
+			CAM_ERR(CAM_JPEG, "unexpected abort irq");
+		}
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+	}
+	/* Unexpected/unintended HW interrupt */
+	if (CAM_JPEG_HW_IRQ_IS_ERR(irq_status, hw_info)) {
+		spin_lock(&jpeg_enc_dev->hw_lock);
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+		CAM_ERR_RATE_LIMIT(CAM_JPEG,
+			"error irq_num %d  irq_status = %x , core_state %d",
+			irq_num, irq_status, core_info->core_state);
+
+		if (core_info->irq_cb.jpeg_hw_mgr_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb(irq_status,
+				-1,
+				core_info->irq_cb.data);
+		}
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_jpeg_enc_reset_hw(void *data,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+	unsigned long rem_jiffies;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	/* maskdisable.clrirq.maskenable.resetcmd */
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	mutex_lock(&core_info->core_mutex);
+	spin_lock(&jpeg_enc_dev->hw_lock);
+	if (core_info->core_state == CAM_JPEG_ENC_CORE_RESETTING) {
+		CAM_ERR(CAM_JPEG, "alrady resetting");
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	reinit_completion(&jpeg_enc_dev->hw_complete);
+	core_info->core_state = CAM_JPEG_ENC_CORE_RESETTING;
+	spin_unlock(&jpeg_enc_dev->hw_lock);
+
+	cam_io_w_mb(hw_info->reg_val.int_mask_disable_all,
+		mem_base + hw_info->reg_offset.int_mask);
+	cam_io_w_mb(hw_info->reg_val.int_clr_clearall,
+		mem_base + hw_info->reg_offset.int_clr);
+	cam_io_w_mb(hw_info->reg_val.int_mask_enable_all,
+		mem_base + hw_info->reg_offset.int_mask);
+	cam_io_w_mb(hw_info->reg_val.reset_cmd,
+		mem_base + hw_info->reg_offset.reset_cmd);
+
+	rem_jiffies = wait_for_completion_timeout(&jpeg_enc_dev->hw_complete,
+		CAM_JPEG_ENC_RESET_TIMEOUT);
+	if (!rem_jiffies) {
+		CAM_ERR(CAM_JPEG, "error Reset Timeout");
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	}
+
+	mutex_unlock(&core_info->core_mutex);
+	return 0;
+}
+
+int cam_jpeg_enc_start_hw(void *data,
+	void *start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	if (core_info->core_state != CAM_JPEG_ENC_CORE_READY) {
+		CAM_ERR(CAM_JPEG, "Error not ready");
+		return -EINVAL;
+	}
+
+	cam_io_w_mb(hw_info->reg_val.hw_cmd_start,
+		mem_base + hw_info->reg_offset.hw_cmd);
+
+	return 0;
+}
+
+int cam_jpeg_enc_stop_hw(void *data,
+	void *stop_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = data;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	void __iomem *mem_base;
+	unsigned long rem_jiffies;
+
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "Invalid args");
+		return -EINVAL;
+	}
+	soc_info = &jpeg_enc_dev->soc_info;
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	hw_info = core_info->jpeg_enc_hw_info;
+	mem_base = soc_info->reg_map[0].mem_base;
+
+	mutex_lock(&core_info->core_mutex);
+	spin_lock(&jpeg_enc_dev->hw_lock);
+	if (core_info->core_state == CAM_JPEG_ENC_CORE_ABORTING) {
+		CAM_ERR(CAM_JPEG, "alrady stopping");
+		spin_unlock(&jpeg_enc_dev->hw_lock);
+		mutex_unlock(&core_info->core_mutex);
+		return 0;
+	}
+
+	reinit_completion(&jpeg_enc_dev->hw_complete);
+	core_info->core_state = CAM_JPEG_ENC_CORE_ABORTING;
+	spin_unlock(&jpeg_enc_dev->hw_lock);
+
+	cam_io_w_mb(hw_info->reg_val.hw_cmd_stop,
+		mem_base + hw_info->reg_offset.hw_cmd);
+
+	rem_jiffies = wait_for_completion_timeout(&jpeg_enc_dev->hw_complete,
+		CAM_JPEG_ENC_RESET_TIMEOUT);
+	if (!rem_jiffies) {
+		CAM_ERR(CAM_JPEG, "error Reset Timeout");
+		core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	}
+
+	mutex_unlock(&core_info->core_mutex);
+	return 0;
+}
+
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *jpeg_enc_dev = device_priv;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	if (!device_priv) {
+		CAM_ERR(CAM_JPEG, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd_type >= CAM_JPEG_CMD_MAX) {
+		CAM_ERR(CAM_JPEG, "Invalid command : %x", cmd_type);
+		return -EINVAL;
+	}
+
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+
+	switch (cmd_type) {
+	case CAM_JPEG_CMD_SET_IRQ_CB:
+	{
+		struct cam_jpeg_set_irq_cb *irq_cb = cmd_args;
+
+		if (!cmd_args) {
+			CAM_ERR(CAM_JPEG, "cmd args NULL");
+			return -EINVAL;
+		}
+		if (irq_cb->b_set_cb) {
+			core_info->irq_cb.jpeg_hw_mgr_cb =
+				irq_cb->jpeg_hw_mgr_cb;
+			core_info->irq_cb.data = irq_cb->data;
+		} else {
+			core_info->irq_cb.jpeg_hw_mgr_cb = NULL;
+			core_info->irq_cb.data = NULL;
+		}
+		rc = 0;
+		break;
+	}
+	default:
+		rc = -EINVAL;
+		break;
+	}
+	if (rc)
+		CAM_ERR(CAM_JPEG, "error cmdtype %d rc = %d", cmd_type, rc);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
new file mode 100644
index 0000000..d4262e0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_core.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef CAM_JPEG_ENC_CORE_H
+#define CAM_JPEG_ENC_CORE_H
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+
+#include "cam_jpeg_hw_intf.h"
+
+struct cam_jpeg_enc_reg_offsets {
+	uint32_t hw_version;
+	uint32_t int_status;
+	uint32_t int_clr;
+	uint32_t int_mask;
+	uint32_t hw_cmd;
+	uint32_t reset_cmd;
+	uint32_t encode_size;
+};
+
+struct cam_jpeg_enc_regval {
+	uint32_t int_clr_clearall;
+	uint32_t int_mask_disable_all;
+	uint32_t int_mask_enable_all;
+	uint32_t hw_cmd_start;
+	uint32_t reset_cmd;
+	uint32_t hw_cmd_stop;
+};
+
+struct cam_jpeg_enc_int_status {
+	uint32_t framedone;
+	uint32_t resetdone;
+	uint32_t iserror;
+	uint32_t stopdone;
+};
+
+struct cam_jpeg_enc_device_hw_info {
+	struct cam_jpeg_enc_reg_offsets reg_offset;
+	struct cam_jpeg_enc_regval reg_val;
+	struct cam_jpeg_enc_int_status int_status;
+};
+
+enum cam_jpeg_enc_core_state {
+	CAM_JPEG_ENC_CORE_NOT_READY,
+	CAM_JPEG_ENC_CORE_READY,
+	CAM_JPEG_ENC_CORE_RESETTING,
+	CAM_JPEG_ENC_CORE_ABORTING,
+	CAM_JPEG_ENC_CORE_STATE_MAX,
+};
+
+struct cam_jpeg_enc_device_core_info {
+	enum cam_jpeg_enc_core_state core_state;
+	struct cam_jpeg_enc_device_hw_info *jpeg_enc_hw_info;
+	uint32_t cpas_handle;
+	struct cam_jpeg_set_irq_cb irq_cb;
+	int32_t ref_count;
+	struct mutex core_mutex;
+};
+
+int cam_jpeg_enc_init_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_deinit_hw(void *device_priv,
+	void *init_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_start_hw(void *device_priv,
+	void *start_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_stop_hw(void *device_priv,
+	void *stop_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_reset_hw(void *device_priv,
+	void *reset_hw_args, uint32_t arg_size);
+int cam_jpeg_enc_process_cmd(void *device_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+irqreturn_t cam_jpeg_enc_irq(int irq_num, void *data);
+
+#endif /* CAM_JPEG_ENC_CORE_H */
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
new file mode 100644
index 0000000..4fc9009
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_dev.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_device.h>
+#include <linux/timer.h>
+
+#include "jpeg_enc_core.h"
+#include "jpeg_enc_soc.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_io_util.h"
+#include "cam_jpeg_hw_intf.h"
+#include "cam_jpeg_hw_mgr_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_jpeg_enc_hw_info_ver_4_2_0.h"
+
+static int cam_jpeg_enc_register_cpas(struct cam_hw_soc_info *soc_info,
+	struct cam_jpeg_enc_device_core_info *core_info,
+	uint32_t hw_idx)
+{
+	struct cam_cpas_register_params cpas_register_params;
+	int rc;
+
+	cpas_register_params.dev = soc_info->dev;
+	memcpy(cpas_register_params.identifier, "jpeg-enc",
+		sizeof("jpeg-enc"));
+	cpas_register_params.cam_cpas_client_cb = NULL;
+	cpas_register_params.cell_index = hw_idx;
+	cpas_register_params.userdata = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_params);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, "cpas_register failed: %d", rc);
+		return rc;
+	}
+	core_info->cpas_handle = cpas_register_params.client_handle;
+
+	return rc;
+}
+
+static int cam_jpeg_enc_unregister_cpas(
+	struct cam_jpeg_enc_device_core_info *core_info)
+{
+	int rc;
+
+	rc = cam_cpas_unregister_client(core_info->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "cpas unregister failed: %d", rc);
+	core_info->cpas_handle = 0;
+
+	return rc;
+}
+
+static int cam_jpeg_enc_remove(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_enc_dev = NULL;
+	struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	int rc;
+
+	jpeg_enc_dev_intf = platform_get_drvdata(pdev);
+	if (!jpeg_enc_dev_intf) {
+		CAM_ERR(CAM_JPEG, "error No data in pdev");
+		return -EINVAL;
+	}
+
+	jpeg_enc_dev = jpeg_enc_dev_intf->hw_priv;
+	if (!jpeg_enc_dev) {
+		CAM_ERR(CAM_JPEG, "error HW data is NULL");
+		rc = -ENODEV;
+		goto free_jpeg_hw_intf;
+	}
+
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+	if (!core_info) {
+		CAM_ERR(CAM_JPEG, "error core data NULL");
+		goto deinit_soc;
+	}
+
+	rc = cam_jpeg_enc_unregister_cpas(core_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, " unreg failed to reg cpas %d", rc);
+
+	mutex_destroy(&core_info->core_mutex);
+	kfree(core_info);
+
+deinit_soc:
+	rc = cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "Failed to deinit soc rc=%d", rc);
+
+	mutex_destroy(&jpeg_enc_dev->hw_mutex);
+	kfree(jpeg_enc_dev);
+
+free_jpeg_hw_intf:
+	kfree(jpeg_enc_dev_intf);
+	return rc;
+}
+
+static int cam_jpeg_enc_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *jpeg_enc_dev = NULL;
+	struct cam_hw_intf *jpeg_enc_dev_intf = NULL;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_jpeg_enc_device_core_info *core_info = NULL;
+	struct cam_jpeg_enc_device_hw_info *hw_info = NULL;
+	int rc;
+
+	jpeg_enc_dev_intf = kzalloc(sizeof(struct cam_hw_intf), GFP_KERNEL);
+	if (!jpeg_enc_dev_intf)
+		return -ENOMEM;
+
+	of_property_read_u32(pdev->dev.of_node,
+		"cell-index", &jpeg_enc_dev_intf->hw_idx);
+
+	jpeg_enc_dev = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!jpeg_enc_dev) {
+		rc = -ENOMEM;
+		goto error_alloc_dev;
+	}
+	jpeg_enc_dev->soc_info.pdev = pdev;
+	jpeg_enc_dev->soc_info.dev = &pdev->dev;
+	jpeg_enc_dev->soc_info.dev_name = pdev->name;
+	jpeg_enc_dev_intf->hw_priv = jpeg_enc_dev;
+	jpeg_enc_dev_intf->hw_ops.init = cam_jpeg_enc_init_hw;
+	jpeg_enc_dev_intf->hw_ops.deinit = cam_jpeg_enc_deinit_hw;
+	jpeg_enc_dev_intf->hw_ops.start = cam_jpeg_enc_start_hw;
+	jpeg_enc_dev_intf->hw_ops.stop = cam_jpeg_enc_stop_hw;
+	jpeg_enc_dev_intf->hw_ops.reset = cam_jpeg_enc_reset_hw;
+	jpeg_enc_dev_intf->hw_ops.process_cmd = cam_jpeg_enc_process_cmd;
+	jpeg_enc_dev_intf->hw_type = CAM_JPEG_DEV_ENC;
+
+	platform_set_drvdata(pdev, jpeg_enc_dev_intf);
+	jpeg_enc_dev->core_info =
+		kzalloc(sizeof(struct cam_jpeg_enc_device_core_info),
+			GFP_KERNEL);
+	if (!jpeg_enc_dev->core_info) {
+		rc = -ENOMEM;
+		goto error_alloc_core;
+	}
+	core_info = (struct cam_jpeg_enc_device_core_info *)
+		jpeg_enc_dev->core_info;
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev) {
+		CAM_ERR(CAM_JPEG, " No jpeg_enc hardware info");
+		rc = -EINVAL;
+		goto error_match_dev;
+	}
+	hw_info = (struct cam_jpeg_enc_device_hw_info *)match_dev->data;
+	core_info->jpeg_enc_hw_info = hw_info;
+	core_info->core_state = CAM_JPEG_ENC_CORE_NOT_READY;
+	mutex_init(&core_info->core_mutex);
+
+	rc = cam_jpeg_enc_init_soc_resources(&jpeg_enc_dev->soc_info,
+		cam_jpeg_enc_irq,
+		jpeg_enc_dev);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to init_soc %d", rc);
+		goto error_init_soc;
+	}
+
+	rc = cam_jpeg_enc_register_cpas(&jpeg_enc_dev->soc_info,
+		core_info, jpeg_enc_dev_intf->hw_idx);
+	if (rc) {
+		CAM_ERR(CAM_JPEG, " failed to reg cpas %d", rc);
+		goto error_reg_cpas;
+	}
+	jpeg_enc_dev->hw_state = CAM_HW_STATE_POWER_DOWN;
+	mutex_init(&jpeg_enc_dev->hw_mutex);
+	spin_lock_init(&jpeg_enc_dev->hw_lock);
+	init_completion(&jpeg_enc_dev->hw_complete);
+
+	return rc;
+
+error_reg_cpas:
+	cam_soc_util_release_platform_resource(&jpeg_enc_dev->soc_info);
+error_init_soc:
+	mutex_destroy(&core_info->core_mutex);
+error_match_dev:
+	kfree(jpeg_enc_dev->core_info);
+error_alloc_core:
+	kfree(jpeg_enc_dev);
+error_alloc_dev:
+	kfree(jpeg_enc_dev_intf);
+
+	return rc;
+}
+
+static const struct of_device_id cam_jpeg_enc_dt_match[] = {
+	{
+		.compatible = "qcom,cam_jpeg_enc",
+		.data = &cam_jpeg_enc_hw_info,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_jpeg_enc_dt_match);
+
+static struct platform_driver cam_jpeg_enc_driver = {
+	.probe = cam_jpeg_enc_probe,
+	.remove = cam_jpeg_enc_remove,
+	.driver = {
+		.name = "cam-jpeg-enc",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_jpeg_enc_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_jpeg_enc_init_module(void)
+{
+	return platform_driver_register(&cam_jpeg_enc_driver);
+}
+
+static void __exit cam_jpeg_enc_exit_module(void)
+{
+	platform_driver_unregister(&cam_jpeg_enc_driver);
+}
+
+module_init(cam_jpeg_enc_init_module);
+module_exit(cam_jpeg_enc_exit_module);
+MODULE_DESCRIPTION("CAM JPEG_ENC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
new file mode 100644
index 0000000..ef1a80a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/dma-buf.h>
+#include <media/cam_defs.h>
+#include <media/cam_jpeg.h>
+
+#include "jpeg_enc_soc.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_enc_irq_handler, void *irq_data)
+{
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		jpeg_enc_irq_handler,
+		irq_data);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "init soc failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, true);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "enable platform failed %d", rc);
+
+	return rc;
+}
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info)
+{
+	int rc;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc)
+		CAM_ERR(CAM_JPEG, "disable platform failed %d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
new file mode 100644
index 0000000..9653893
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_jpeg/jpeg_hw/jpeg_enc_hw/jpeg_enc_soc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_JPEG_ENC_SOC_H_
+#define _CAM_JPEG_ENC_SOC_H_
+
+#include "cam_soc_util.h"
+
+int cam_jpeg_enc_init_soc_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t jpeg_enc_irq_handler, void *irq_data);
+
+int cam_jpeg_enc_enable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+int cam_jpeg_enc_disable_soc_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_JPEG_ENC_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_lrme/Makefile b/drivers/media/platform/msm/camera/cam_lrme/Makefile
new file mode 100644
index 0000000..44636ec
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_dev.o cam_lrme_context.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
new file mode 100644
index 0000000..cc69859
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_debug_util.h"
+#include "cam_lrme_context.h"
+
+static const char lrme_dev_name[] = "lrme";
+
+static int __cam_lrme_ctx_acquire_dev_in_available(struct cam_context *ctx,
+	struct cam_acquire_dev_cmd *cmd)
+{
+	int rc = 0;
+	uintptr_t ctxt_to_hw_map = (uintptr_t)ctx->ctxt_to_hw_map;
+	struct cam_lrme_context *lrme_ctx = ctx->ctx_priv;
+
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+	rc = cam_context_acquire_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to acquire");
+		return rc;
+	}
+
+	ctxt_to_hw_map |= (lrme_ctx->index << CAM_LRME_CTX_INDEX_SHIFT);
+	ctx->ctxt_to_hw_map = (void *)ctxt_to_hw_map;
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_acquired(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to release");
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_start_dev_in_acquired(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+	rc = cam_context_start_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to start");
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACTIVATED;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_config_dev_in_activated(struct cam_context *ctx,
+	struct cam_config_dev_cmd *cmd)
+{
+	int rc;
+
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+	rc = cam_context_prepare_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to config");
+		return rc;
+	}
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_flush_dev_in_activated(struct cam_context *ctx,
+	struct cam_flush_dev_cmd *cmd)
+{
+	int rc;
+
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+	rc = cam_context_flush_dev_to_hw(ctx, cmd);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed to flush device");
+
+	return rc;
+}
+static int __cam_lrme_ctx_stop_dev_in_activated(struct cam_context *ctx,
+	struct cam_start_stop_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+	rc = cam_context_stop_dev_to_hw(ctx);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to stop dev");
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_ACQUIRED;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_release_dev_in_activated(struct cam_context *ctx,
+	struct cam_release_dev_cmd *cmd)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter ctx %d", ctx->ctx_id);
+
+	rc = __cam_lrme_ctx_stop_dev_in_activated(ctx, NULL);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to stop");
+		return rc;
+	}
+
+	rc = cam_context_release_dev_to_hw(ctx, cmd);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to release");
+		return rc;
+	}
+
+	ctx->state = CAM_CTX_AVAILABLE;
+
+	return rc;
+}
+
+static int __cam_lrme_ctx_handle_irq_in_activated(void *context,
+	uint32_t evt_id, void *evt_data)
+{
+	int rc;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	rc = cam_context_buf_done_from_hw(context, evt_data, evt_id);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed in buf done, rc=%d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+/* top state machine */
+static struct cam_ctx_ops
+	cam_lrme_ctx_state_machine[CAM_CTX_STATE_MAX] = {
+	/* Uninit */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Available */
+	{
+		.ioctl_ops = {
+			.acquire_dev = __cam_lrme_ctx_acquire_dev_in_available,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Acquired */
+	{
+		.ioctl_ops = {
+			.config_dev = __cam_lrme_ctx_config_dev_in_activated,
+			.release_dev = __cam_lrme_ctx_release_dev_in_acquired,
+			.start_dev = __cam_lrme_ctx_start_dev_in_acquired,
+		},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Ready */
+	{
+		.ioctl_ops = {},
+		.crm_ops = {},
+		.irq_ops = NULL,
+	},
+	/* Activate */
+	{
+		.ioctl_ops = {
+			.config_dev = __cam_lrme_ctx_config_dev_in_activated,
+			.release_dev = __cam_lrme_ctx_release_dev_in_activated,
+			.stop_dev = __cam_lrme_ctx_stop_dev_in_activated,
+			.flush_dev = __cam_lrme_ctx_flush_dev_in_activated,
+		},
+		.crm_ops = {},
+		.irq_ops = __cam_lrme_ctx_handle_irq_in_activated,
+	},
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+	struct cam_context *base_ctx,
+	struct cam_hw_mgr_intf *hw_intf,
+	uint32_t index)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	if (!base_ctx || !lrme_ctx) {
+		CAM_ERR(CAM_LRME, "Invalid input");
+		return -EINVAL;
+	}
+
+	memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+
+	rc = cam_context_init(base_ctx, lrme_dev_name, CAM_LRME, index,
+		NULL, hw_intf, lrme_ctx->req_base, CAM_CTX_REQ_MAX);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to init context");
+		return rc;
+	}
+	lrme_ctx->base = base_ctx;
+	lrme_ctx->index = index;
+	base_ctx->ctx_priv = lrme_ctx;
+	base_ctx->state_machine = cam_lrme_ctx_state_machine;
+
+	return rc;
+}
+
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx)
+{
+	int rc = 0;
+
+	CAM_DBG(CAM_LRME, "Enter");
+
+	if (!lrme_ctx) {
+		CAM_ERR(CAM_LRME, "No ctx to deinit");
+		return -EINVAL;
+	}
+
+	rc = cam_context_deinit(lrme_ctx->base);
+
+	memset(lrme_ctx, 0, sizeof(*lrme_ctx));
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
new file mode 100644
index 0000000..f4072c3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_context.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_LRME_CONTEXT_H_
+#define _CAM_LRME_CONTEXT_H_
+
+#include "cam_context.h"
+#include "cam_context_utils.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_sync_api.h"
+
+#define CAM_LRME_CTX_INDEX_SHIFT 32
+
+/**
+ * struct cam_lrme_context
+ *
+ * @base      : Base context pointer for this LRME context
+ * @req_base  : List of base request for this LRME context
+ */
+struct cam_lrme_context {
+	struct cam_context         *base;
+	struct cam_ctx_request      req_base[CAM_CTX_REQ_MAX];
+	uint64_t index;
+};
+
+int cam_lrme_context_init(struct cam_lrme_context *lrme_ctx,
+	struct cam_context *base_ctx, struct cam_hw_mgr_intf *hw_intf,
+	uint32_t index);
+int cam_lrme_context_deinit(struct cam_lrme_context *lrme_ctx);
+
+#endif /* _CAM_LRME_CONTEXT_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
new file mode 100644
index 0000000..16345cc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/cam_lrme_dev.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_subdev.h"
+#include "cam_node.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_lrme_hw_mgr_intf.h"
+
+#define CAM_LRME_DEV_NAME "cam-lrme"
+
+/**
+ * struct cam_lrme_dev
+ *
+ * @sd       : Subdev information
+ * @ctx      : List of base contexts
+ * @lrme_ctx : List of LRME contexts
+ * @lock     : Mutex for LRME subdev
+ * @open_cnt : Open count of LRME subdev
+ */
+struct cam_lrme_dev {
+	struct cam_subdev        sd;
+	struct cam_context       ctx[CAM_CTX_MAX];
+	struct cam_lrme_context  lrme_ctx[CAM_CTX_MAX];
+	struct mutex             lock;
+	uint32_t                 open_cnt;
+};
+
+static struct cam_lrme_dev *g_lrme_dev;
+
+static int cam_lrme_dev_buf_done_cb(void *ctxt_to_hw_map, uint32_t evt_id,
+	void *evt_data)
+{
+	uint64_t index;
+	struct cam_context *ctx;
+	int rc;
+
+	index = CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map);
+	CAM_DBG(CAM_LRME, "ctx index %llu, evt_id %u\n", index, evt_id);
+	ctx = &g_lrme_dev->ctx[index];
+	rc = ctx->irq_cb_intf(ctx, evt_id, evt_data);
+	if (rc)
+		CAM_ERR(CAM_LRME, "irq callback failed");
+
+	return rc;
+}
+
+static int cam_lrme_dev_open(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+
+	if (!lrme_dev) {
+		CAM_ERR(CAM_LRME,
+			"LRME Dev not initialized, dev=%pK", lrme_dev);
+		return -ENODEV;
+	}
+
+	mutex_lock(&lrme_dev->lock);
+	lrme_dev->open_cnt++;
+	mutex_unlock(&lrme_dev->lock);
+
+	return 0;
+}
+
+static int cam_lrme_dev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	int rc = 0;
+	struct cam_lrme_dev *lrme_dev = g_lrme_dev;
+	struct cam_node *node = v4l2_get_subdevdata(sd);
+
+	if (!lrme_dev) {
+		CAM_ERR(CAM_LRME, "Invalid args");
+		return -ENODEV;
+	}
+
+	mutex_lock(&lrme_dev->lock);
+	if (lrme_dev->open_cnt <= 0) {
+		CAM_DBG(CAM_LRME, "LRME subdev is already closed");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	lrme_dev->open_cnt--;
+	if (!node) {
+		CAM_ERR(CAM_LRME, "Node is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (lrme_dev->open_cnt == 0)
+		cam_node_shutdown(node);
+
+end:
+	mutex_unlock(&lrme_dev->lock);
+	return rc;
+}
+
+static const struct v4l2_subdev_internal_ops cam_lrme_subdev_internal_ops = {
+	.open = cam_lrme_dev_open,
+	.close = cam_lrme_dev_close,
+};
+
+static int cam_lrme_dev_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	struct cam_hw_mgr_intf hw_mgr_intf;
+	struct cam_node *node;
+
+	g_lrme_dev = kzalloc(sizeof(struct cam_lrme_dev), GFP_KERNEL);
+	if (!g_lrme_dev) {
+		CAM_ERR(CAM_LRME, "No memory");
+		return -ENOMEM;
+	}
+	g_lrme_dev->sd.internal_ops = &cam_lrme_subdev_internal_ops;
+
+	mutex_init(&g_lrme_dev->lock);
+
+	rc = cam_subdev_probe(&g_lrme_dev->sd, pdev, CAM_LRME_DEV_NAME,
+		CAM_LRME_DEVICE_TYPE);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "LRME cam_subdev_probe failed");
+		goto free_mem;
+	}
+	node = (struct cam_node *)g_lrme_dev->sd.token;
+
+	rc = cam_lrme_hw_mgr_init(&hw_mgr_intf, cam_lrme_dev_buf_done_cb);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Can not initialized LRME HW manager");
+		goto unregister;
+	}
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_lrme_context_init(&g_lrme_dev->lrme_ctx[i],
+				&g_lrme_dev->ctx[i],
+				&node->hw_mgr_intf, i);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "LRME context init failed");
+			goto deinit_ctx;
+		}
+	}
+
+	rc = cam_node_init(node, &hw_mgr_intf, g_lrme_dev->ctx, CAM_CTX_MAX,
+		CAM_LRME_DEV_NAME);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "LRME node init failed");
+		goto deinit_ctx;
+	}
+
+	CAM_DBG(CAM_LRME, "%s probe complete", g_lrme_dev->sd.name);
+
+	return 0;
+
+deinit_ctx:
+	for (--i; i >= 0; i--) {
+		if (cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]))
+			CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+	}
+unregister:
+	if (cam_subdev_remove(&g_lrme_dev->sd))
+		CAM_ERR(CAM_LRME, "Failed in subdev remove");
+free_mem:
+	kfree(g_lrme_dev);
+
+	return rc;
+}
+
+static int cam_lrme_dev_remove(struct platform_device *pdev)
+{
+	int i;
+	int rc = 0;
+
+	for (i = 0; i < CAM_CTX_MAX; i++) {
+		rc = cam_lrme_context_deinit(&g_lrme_dev->lrme_ctx[i]);
+		if (rc)
+			CAM_ERR(CAM_LRME, "LRME context %d deinit failed", i);
+	}
+
+	rc = cam_lrme_hw_mgr_deinit();
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed in hw mgr deinit, rc=%d", rc);
+
+	rc = cam_subdev_remove(&g_lrme_dev->sd);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Unregister failed");
+
+	mutex_destroy(&g_lrme_dev->lock);
+	kfree(g_lrme_dev);
+	g_lrme_dev = NULL;
+
+	return rc;
+}
+
+static const struct of_device_id cam_lrme_dt_match[] = {
+	{
+		.compatible = "qcom,cam-lrme"
+	},
+	{}
+};
+
+static struct platform_driver cam_lrme_driver = {
+	.probe = cam_lrme_dev_probe,
+	.remove = cam_lrme_dev_remove,
+	.driver = {
+		.name = "cam_lrme",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_lrme_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_lrme_dev_init_module(void)
+{
+	return platform_driver_register(&cam_lrme_driver);
+}
+
+static void __exit cam_lrme_dev_exit_module(void)
+{
+	platform_driver_unregister(&cam_lrme_driver);
+}
+
+module_init(cam_lrme_dev_init_module);
+module_exit(cam_lrme_dev_exit_module);
+MODULE_DESCRIPTION("MSM LRME driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile
new file mode 100644
index 0000000..d49d536e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += lrme_hw/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
new file mode 100644
index 0000000..98b090e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+#include "cam_packet_util.h"
+#include "cam_lrme_context.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_mgr_intf.h"
+#include "cam_lrme_hw_mgr.h"
+
+static struct cam_lrme_hw_mgr g_lrme_hw_mgr;
+
+static int cam_lrme_mgr_util_reserve_device(struct cam_lrme_hw_mgr *hw_mgr,
+	struct cam_lrme_acquire_args *lrme_acquire_args)
+{
+	int i, index = 0;
+	uint32_t min_ctx = UINT_MAX;
+	struct cam_lrme_device *hw_device = NULL;
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	if (!hw_mgr->device_count) {
+		mutex_unlock(&hw_mgr->hw_mgr_mutex);
+		CAM_ERR(CAM_LRME, "No device is registered");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < hw_mgr->device_count && i < CAM_LRME_HW_MAX; i++) {
+		hw_device = &hw_mgr->hw_device[i];
+		if (!hw_device->num_context) {
+			index = i;
+			break;
+		}
+		if (hw_device->num_context < min_ctx) {
+			min_ctx = hw_device->num_context;
+			index = i;
+		}
+	}
+
+	hw_device = &hw_mgr->hw_device[index];
+	hw_device->num_context++;
+
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	CAM_DBG(CAM_LRME, "reserve device index %d", index);
+
+	return index;
+}
+
+static int cam_lrme_mgr_util_get_device(struct cam_lrme_hw_mgr *hw_mgr,
+	uint32_t device_index, struct cam_lrme_device **hw_device)
+{
+	if (!hw_mgr) {
+		CAM_ERR(CAM_LRME, "invalid params hw_mgr %pK", hw_mgr);
+		return -EINVAL;
+	}
+
+	if (device_index >= CAM_LRME_HW_MAX) {
+		CAM_ERR(CAM_LRME, "Wrong device index %d", device_index);
+		return -EINVAL;
+	}
+
+	*hw_device = &hw_mgr->hw_device[device_index];
+
+	return 0;
+}
+
+static int cam_lrme_mgr_util_packet_validate(struct cam_packet *packet)
+{
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	int i, rc;
+
+	if (!packet) {
+		CAM_ERR(CAM_LRME, "Invalid args");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_LRME, "Packet request=%d, op_code=0x%x, size=%d, flags=%d",
+		packet->header.request_id, packet->header.op_code,
+		packet->header.size, packet->header.flags);
+	CAM_DBG(CAM_LRME,
+		"Packet cmdbuf(offset=%d, num=%d) io(offset=%d, num=%d)",
+		packet->cmd_buf_offset, packet->num_cmd_buf,
+		packet->io_configs_offset, packet->num_io_configs);
+	CAM_DBG(CAM_LRME,
+		"Packet Patch(offset=%d, num=%d) kmd(offset=%d, num=%d)",
+		packet->patch_offset, packet->num_patches,
+		packet->kmd_cmd_buf_offset, packet->kmd_cmd_buf_index);
+
+	if (cam_packet_util_validate_packet(packet)) {
+		CAM_ERR(CAM_LRME, "invalid packet:%d %d %d %d %d",
+			packet->kmd_cmd_buf_index,
+			packet->num_cmd_buf, packet->cmd_buf_offset,
+			packet->io_configs_offset, packet->header.size);
+		return -EINVAL;
+	}
+
+	if (!packet->num_io_configs) {
+		CAM_ERR(CAM_LRME, "no io configs");
+		return -EINVAL;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)&packet->payload +
+		packet->cmd_buf_offset);
+
+	for (i = 0; i < packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		CAM_DBG(CAM_LRME,
+			"CmdBuf[%d] hdl=%d, offset=%d, size=%d, len=%d, type=%d, meta_data=%d",
+			i,
+			cmd_desc[i].mem_handle, cmd_desc[i].offset,
+			cmd_desc[i].size, cmd_desc[i].length, cmd_desc[i].type,
+			cmd_desc[i].meta_data);
+
+		rc = cam_packet_util_validate_cmd_desc(&cmd_desc[i]);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Invalid cmd buffer %d", i);
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_io_buffer(int32_t iommu_hdl,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_lrme_hw_io_buffer *input_buf,
+	struct cam_lrme_hw_io_buffer *output_buf, uint32_t io_buf_size)
+{
+	int rc = -EINVAL;
+	uint32_t num_in_buf, num_out_buf, i, j, plane;
+	struct cam_buf_io_cfg *io_cfg;
+	uint64_t io_addr[CAM_PACKET_MAX_PLANES];
+	size_t size;
+
+	num_in_buf = 0;
+	num_out_buf = 0;
+	io_cfg = (struct cam_buf_io_cfg *)((uint8_t *)
+		 &prepare->packet->payload +
+		 prepare->packet->io_configs_offset);
+
+	for (i = 0; i < prepare->packet->num_io_configs; i++) {
+		CAM_DBG(CAM_LRME,
+			"IOConfig[%d] : handle[%d] Dir[%d] Res[%d] Fence[%d], Format[%d]",
+			i, io_cfg[i].mem_handle[0], io_cfg[i].direction,
+			io_cfg[i].resource_type,
+			io_cfg[i].fence, io_cfg[i].format);
+
+		if ((num_in_buf > io_buf_size) ||
+			(num_out_buf > io_buf_size)) {
+			CAM_ERR(CAM_LRME, "Invalid number of buffers %d %d %d",
+				num_in_buf, num_out_buf, io_buf_size);
+			return -EINVAL;
+		}
+
+		memset(io_addr, 0, sizeof(io_addr));
+		for (plane = 0; plane < CAM_PACKET_MAX_PLANES; plane++) {
+			if (!io_cfg[i].mem_handle[plane])
+				break;
+
+			rc = cam_mem_get_io_buf(io_cfg[i].mem_handle[plane],
+				iommu_hdl, &io_addr[plane], &size);
+			if (rc) {
+				CAM_ERR(CAM_LRME, "Cannot get io buf for %d %d",
+					plane, rc);
+				return -ENOMEM;
+			}
+
+			io_addr[plane] += io_cfg[i].offsets[plane];
+
+			CAM_DBG(CAM_LRME, "IO Address[%d][%d] : %llu",
+				io_cfg[i].direction, plane, io_addr[plane]);
+		}
+
+		switch (io_cfg[i].direction) {
+		case CAM_BUF_INPUT: {
+			prepare->in_map_entries[num_in_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->in_map_entries[num_in_buf].sync_id =
+				io_cfg[i].fence;
+
+			input_buf[num_in_buf].valid = true;
+			for (j = 0; j < plane; j++)
+				input_buf[num_in_buf].io_addr[j] = io_addr[j];
+			input_buf[num_in_buf].num_plane = plane;
+			input_buf[num_in_buf].io_cfg = &io_cfg[i];
+
+			num_in_buf++;
+			break;
+		}
+		case CAM_BUF_OUTPUT: {
+			prepare->out_map_entries[num_out_buf].resource_handle =
+				io_cfg[i].resource_type;
+			prepare->out_map_entries[num_out_buf].sync_id =
+				io_cfg[i].fence;
+
+			output_buf[num_out_buf].valid = true;
+			for (j = 0; j < plane; j++)
+				output_buf[num_out_buf].io_addr[j] = io_addr[j];
+			output_buf[num_out_buf].num_plane = plane;
+			output_buf[num_out_buf].io_cfg = &io_cfg[i];
+
+			num_out_buf++;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_LRME, "Unsupported io direction %d",
+				io_cfg[i].direction);
+			return -EINVAL;
+		}
+	}
+	prepare->num_in_map_entries = num_in_buf;
+	prepare->num_out_map_entries = num_out_buf;
+
+	return 0;
+}
+
+static int cam_lrme_mgr_util_prepare_hw_update_entries(
+	struct cam_lrme_hw_mgr *hw_mgr,
+	struct cam_hw_prepare_update_args *prepare,
+	struct cam_lrme_hw_cmd_config_args *config_args,
+	struct cam_kmd_buf_info *kmd_buf_info)
+{
+	int i, rc = 0;
+	struct cam_lrme_device *hw_device = NULL;
+	uint32_t *kmd_buf_addr;
+	uint32_t num_entry;
+	uint32_t kmd_buf_max_size;
+	uint32_t kmd_buf_used_bytes = 0;
+	struct cam_hw_update_entry *hw_entry;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+
+	hw_device = config_args->hw_device;
+	if (!hw_device) {
+		CAM_ERR(CAM_LRME, "Invalid hw_device");
+		return -EINVAL;
+	}
+
+	kmd_buf_addr = (uint32_t *)((uint8_t *)kmd_buf_info->cpu_addr +
+		kmd_buf_info->used_bytes);
+	kmd_buf_max_size = kmd_buf_info->size - kmd_buf_info->used_bytes;
+
+	config_args->cmd_buf_addr = kmd_buf_addr;
+	config_args->size = kmd_buf_max_size;
+	config_args->config_buf_size = 0;
+
+	if (hw_device->hw_intf.hw_ops.process_cmd) {
+		rc = hw_device->hw_intf.hw_ops.process_cmd(
+			hw_device->hw_intf.hw_priv,
+			CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+			config_args,
+			sizeof(struct cam_lrme_hw_cmd_config_args));
+		if (rc) {
+			CAM_ERR(CAM_LRME,
+				"Failed in CMD_PREPARE_HW_UPDATE %d", rc);
+			return rc;
+		}
+	} else {
+		CAM_ERR(CAM_LRME, "Can't find handle function");
+		return -EINVAL;
+	}
+
+	kmd_buf_used_bytes += config_args->config_buf_size;
+
+	if (!kmd_buf_used_bytes || (kmd_buf_used_bytes > kmd_buf_max_size)) {
+		CAM_ERR(CAM_LRME, "Invalid kmd used bytes %d (%d)",
+			kmd_buf_used_bytes, kmd_buf_max_size);
+		return -ENOMEM;
+	}
+
+	hw_entry = prepare->hw_update_entries;
+	num_entry = 0;
+
+	if (config_args->config_buf_size) {
+		if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_LRME, "Insufficient  HW entries :%d %d",
+				num_entry, prepare->max_hw_update_entries);
+			return -EINVAL;
+		}
+
+		hw_entry[num_entry].handle = kmd_buf_info->handle;
+		hw_entry[num_entry].len = config_args->config_buf_size;
+		hw_entry[num_entry].offset = kmd_buf_info->offset;
+
+		kmd_buf_info->used_bytes += config_args->config_buf_size;
+		kmd_buf_info->offset += config_args->config_buf_size;
+		num_entry++;
+	}
+
+	cmd_desc = (struct cam_cmd_buf_desc *)((uint8_t *)
+		&prepare->packet->payload + prepare->packet->cmd_buf_offset);
+
+	for (i = 0; i < prepare->packet->num_cmd_buf; i++) {
+		if (!cmd_desc[i].length)
+			continue;
+
+		if ((num_entry + 1) >= prepare->max_hw_update_entries) {
+			CAM_ERR(CAM_LRME, "Exceed max num of entry");
+			return -EINVAL;
+		}
+
+		hw_entry[num_entry].handle = cmd_desc[i].mem_handle;
+		hw_entry[num_entry].len = cmd_desc[i].length;
+		hw_entry[num_entry].offset = cmd_desc[i].offset;
+		num_entry++;
+	}
+	prepare->num_hw_update_entries = num_entry;
+
+	CAM_DBG(CAM_LRME, "FinalConfig : hw_entries=%d, Sync(in=%d, out=%d)",
+		prepare->num_hw_update_entries, prepare->num_in_map_entries,
+		prepare->num_out_map_entries);
+
+	return rc;
+}
+
+static void cam_lrme_mgr_util_put_frame_req(
+	struct list_head *src_list,
+	struct list_head *list,
+	spinlock_t *lock)
+{
+	spin_lock(lock);
+	list_add_tail(list, src_list);
+	spin_unlock(lock);
+}
+
+static int cam_lrme_mgr_util_get_frame_req(
+	struct list_head *src_list,
+	struct cam_lrme_frame_request **frame_req,
+	spinlock_t *lock)
+{
+	int rc = 0;
+	struct cam_lrme_frame_request *req_ptr = NULL;
+
+	spin_lock(lock);
+	if (!list_empty(src_list)) {
+		req_ptr = list_first_entry(src_list,
+			struct cam_lrme_frame_request, frame_list);
+		list_del_init(&req_ptr->frame_list);
+	} else {
+		rc = -ENOENT;
+	}
+	*frame_req = req_ptr;
+	spin_unlock(lock);
+
+	return rc;
+}
+
+
+static int cam_lrme_mgr_util_submit_req(void *priv, void *data)
+{
+	struct cam_lrme_device *hw_device;
+	struct cam_lrme_hw_mgr *hw_mgr;
+	struct cam_lrme_frame_request *frame_req = NULL;
+	struct cam_lrme_hw_submit_args submit_args;
+	struct cam_lrme_mgr_work_data *work_data;
+	int rc;
+	int req_prio = 0;
+
+	if (!priv) {
+		CAM_ERR(CAM_LRME, "worker doesn't have private data");
+		return -EINVAL;
+	}
+
+	hw_mgr = (struct cam_lrme_hw_mgr *)priv;
+	work_data = (struct cam_lrme_mgr_work_data *)data;
+	hw_device = work_data->hw_device;
+
+	rc = cam_lrme_mgr_util_get_frame_req(
+		&hw_device->frame_pending_list_high, &frame_req,
+		&hw_device->high_req_lock);
+
+	if (!frame_req) {
+		rc = cam_lrme_mgr_util_get_frame_req(
+			&hw_device->frame_pending_list_normal, &frame_req,
+			&hw_device->normal_req_lock);
+		if (frame_req)
+			req_prio = 1;
+	}
+
+	if (!frame_req) {
+		CAM_DBG(CAM_LRME, "No pending request");
+		return 0;
+	}
+
+	if (hw_device->hw_intf.hw_ops.process_cmd) {
+		submit_args.hw_update_entries = frame_req->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			frame_req->num_hw_update_entries;
+		submit_args.frame_req = frame_req;
+
+		rc = hw_device->hw_intf.hw_ops.process_cmd(
+			hw_device->hw_intf.hw_priv,
+			CAM_LRME_HW_CMD_SUBMIT,
+			&submit_args, sizeof(struct cam_lrme_hw_submit_args));
+
+		if (rc == -EBUSY)
+			CAM_DBG(CAM_LRME, "device busy");
+		else if (rc)
+			CAM_ERR(CAM_LRME, "submit request failed rc %d", rc);
+		if (rc) {
+			req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+				spin_lock(&hw_device->normal_req_lock);
+			list_add(&frame_req->frame_list,
+				(req_prio == 0 ?
+				 &hw_device->frame_pending_list_high :
+				 &hw_device->frame_pending_list_normal));
+			req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+				spin_unlock(&hw_device->normal_req_lock);
+		}
+		if (rc == -EBUSY)
+			rc = 0;
+	} else {
+		req_prio == 0 ? spin_lock(&hw_device->high_req_lock) :
+			spin_lock(&hw_device->normal_req_lock);
+		list_add(&frame_req->frame_list,
+			(req_prio == 0 ?
+			 &hw_device->frame_pending_list_high :
+			 &hw_device->frame_pending_list_normal));
+		req_prio == 0 ? spin_unlock(&hw_device->high_req_lock) :
+			spin_unlock(&hw_device->normal_req_lock);
+		rc = -EINVAL;
+	}
+
+	CAM_DBG(CAM_LRME, "End of submit, rc %d", rc);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_util_schedule_frame_req(
+	struct cam_lrme_hw_mgr *hw_mgr, struct cam_lrme_device *hw_device)
+{
+	int rc = 0;
+	struct crm_workq_task *task;
+	struct cam_lrme_mgr_work_data *work_data;
+
+	task = cam_req_mgr_workq_get_task(hw_device->work);
+	if (!task) {
+		CAM_ERR(CAM_LRME, "Can not get task for worker");
+		return -ENOMEM;
+	}
+
+	work_data = (struct cam_lrme_mgr_work_data *)task->payload;
+	work_data->hw_device = hw_device;
+
+	task->process_cb = cam_lrme_mgr_util_submit_req;
+	CAM_DBG(CAM_LRME, "enqueue submit task");
+	rc = cam_req_mgr_workq_enqueue_task(task, hw_mgr, CRM_TASK_PRIORITY_0);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_util_release(struct cam_lrme_hw_mgr *hw_mgr,
+	uint32_t device_index)
+{
+	int rc = 0;
+	struct cam_lrme_device *hw_device;
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+		return rc;
+	}
+
+	mutex_lock(&hw_mgr->hw_mgr_mutex);
+	hw_device->num_context--;
+	mutex_unlock(&hw_mgr->hw_mgr_mutex);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_cb(void *data,
+	struct cam_lrme_hw_cb_args *cb_args)
+{
+	struct cam_lrme_hw_mgr *hw_mgr = &g_lrme_hw_mgr;
+	int rc = 0;
+	bool frame_abort = true;
+	struct cam_lrme_frame_request *frame_req;
+	struct cam_lrme_device *hw_device;
+
+	if (!data || !cb_args) {
+		CAM_ERR(CAM_LRME, "Invalid input args");
+		return -EINVAL;
+	}
+
+	hw_device = (struct cam_lrme_device *)data;
+	frame_req = cb_args->frame_req;
+
+	if (cb_args->cb_type & CAM_LRME_CB_PUT_FRAME) {
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		INIT_LIST_HEAD(&frame_req->frame_list);
+		cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+				&frame_req->frame_list,
+				&hw_mgr->free_req_lock);
+		cb_args->cb_type &= ~CAM_LRME_CB_PUT_FRAME;
+		frame_req = NULL;
+	}
+
+	if (cb_args->cb_type & CAM_LRME_CB_COMP_REG_UPDATE) {
+		cb_args->cb_type &= ~CAM_LRME_CB_COMP_REG_UPDATE;
+		CAM_DBG(CAM_LRME, "Reg update");
+	}
+
+	if (!frame_req)
+		return rc;
+
+	if (cb_args->cb_type & CAM_LRME_CB_BUF_DONE) {
+		cb_args->cb_type &= ~CAM_LRME_CB_BUF_DONE;
+		frame_abort = false;
+	} else if (cb_args->cb_type & CAM_LRME_CB_ERROR) {
+		cb_args->cb_type &= ~CAM_LRME_CB_ERROR;
+		frame_abort = true;
+	} else {
+		CAM_ERR(CAM_LRME, "Wrong cb type %d, req %lld",
+			cb_args->cb_type, frame_req->req_id);
+		return -EINVAL;
+	}
+
+	if (hw_mgr->event_cb) {
+		struct cam_hw_done_event_data buf_data;
+
+		buf_data.request_id = frame_req->req_id;
+		CAM_DBG(CAM_LRME, "frame req %llu, frame_abort %d",
+			frame_req->req_id, frame_abort);
+		rc = hw_mgr->event_cb(frame_req->ctxt_to_hw_map,
+			frame_abort, &buf_data);
+	} else {
+		CAM_ERR(CAM_LRME, "No cb function");
+	}
+	memset(frame_req, 0x0, sizeof(*frame_req));
+	INIT_LIST_HEAD(&frame_req->frame_list);
+	cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+				&frame_req->frame_list,
+				&hw_mgr->free_req_lock);
+
+	rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_get_caps(void *hw_mgr_priv, void *hw_get_caps_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_query_cap_cmd *args = hw_get_caps_args;
+
+	if (sizeof(struct cam_lrme_query_cap_cmd) != args->size) {
+		CAM_ERR(CAM_LRME,
+			"sizeof(struct cam_query_cap_cmd) = %zu, args->size = %d",
+			sizeof(struct cam_query_cap_cmd), args->size);
+		return -EFAULT;
+	}
+
+	if (copy_to_user(u64_to_user_ptr(args->caps_handle),
+		&(hw_mgr->lrme_caps),
+		sizeof(struct cam_lrme_query_cap_cmd))) {
+		CAM_ERR(CAM_LRME, "copy to user failed");
+		return -EFAULT;
+	}
+
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_acquire(void *hw_mgr_priv, void *hw_acquire_args)
+{
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_acquire_args *args =
+		(struct cam_hw_acquire_args *)hw_acquire_args;
+	struct cam_lrme_acquire_args lrme_acquire_args;
+	uintptr_t device_index;
+
+	if (!hw_mgr_priv || !args) {
+		CAM_ERR(CAM_LRME,
+		"Invalid input params hw_mgr_priv %pK, acquire_args %pK",
+		hw_mgr_priv, args);
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&lrme_acquire_args,
+		(void __user *)args->acquire_info,
+		sizeof(struct cam_lrme_acquire_args))) {
+		CAM_ERR(CAM_LRME, "Failed to copy acquire args from user");
+		return -EFAULT;
+	}
+
+	device_index = cam_lrme_mgr_util_reserve_device(hw_mgr,
+		&lrme_acquire_args);
+	CAM_DBG(CAM_LRME, "Get device id %llu", device_index);
+
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Get wrong device id %lu", device_index);
+		return -EINVAL;
+	}
+
+	/* device_index is the right 4 bit in ctxt_to_hw_map */
+	args->ctxt_to_hw_map = (void *)device_index;
+
+	return 0;
+}
+
+static int cam_lrme_mgr_hw_release(void *hw_mgr_priv, void *hw_release_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_release_args *args =
+		(struct cam_hw_release_args *)hw_release_args;
+	uint64_t device_index;
+
+	if (!hw_mgr_priv || !hw_release_args) {
+		CAM_ERR(CAM_LRME, "Invalid arguments %pK, %pK",
+			hw_mgr_priv, hw_release_args);
+		return -EINVAL;
+	}
+
+	device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %llu", device_index);
+		return -EPERM;
+	}
+
+	rc = cam_lrme_mgr_util_release(hw_mgr, device_index);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed in release device, rc=%d", rc);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_flush(void *hw_mgr_priv, void *hw_flush_args)
+{	int rc = 0, i;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_flush_args *args;
+	struct cam_lrme_device *hw_device;
+	struct cam_lrme_frame_request *frame_req = NULL, *req_to_flush = NULL;
+	struct cam_lrme_frame_request **req_list = NULL;
+	uint32_t device_index;
+	struct cam_lrme_hw_flush_args lrme_flush_args;
+	uint32_t priority;
+
+	if (!hw_mgr_priv || !hw_flush_args) {
+		CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+			hw_mgr_priv, hw_flush_args);
+		return -EINVAL;
+	}
+
+	args = (struct cam_hw_flush_args *)hw_flush_args;
+	device_index = ((uintptr_t)args->ctxt_to_hw_map & 0xF);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+		return -EPERM;
+	}
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+		goto end;
+	}
+
+	req_list = (struct cam_lrme_frame_request **)args->flush_req_pending;
+	for (i = 0; i < args->num_req_pending; i++) {
+		frame_req = req_list[i];
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		cam_lrme_mgr_util_put_frame_req(&hw_mgr->frame_free_list,
+			&frame_req->frame_list, &hw_mgr->free_req_lock);
+	}
+
+	req_list = (struct cam_lrme_frame_request **)args->flush_req_active;
+	for (i = 0; i < args->num_req_active; i++) {
+		frame_req = req_list[i];
+		priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+		spin_lock((priority == CAM_LRME_PRIORITY_HIGH) ?
+			&hw_device->high_req_lock :
+			&hw_device->normal_req_lock);
+		if (!list_empty(&frame_req->frame_list)) {
+			list_del_init(&frame_req->frame_list);
+			cam_lrme_mgr_util_put_frame_req(
+				&hw_mgr->frame_free_list,
+				&frame_req->frame_list,
+				&hw_mgr->free_req_lock);
+		} else
+			req_to_flush = frame_req;
+		spin_unlock((priority == CAM_LRME_PRIORITY_HIGH) ?
+			&hw_device->high_req_lock :
+			&hw_device->normal_req_lock);
+	}
+	if (!req_to_flush)
+		goto end;
+	if (hw_device->hw_intf.hw_ops.flush) {
+		lrme_flush_args.ctxt_to_hw_map = req_to_flush->ctxt_to_hw_map;
+		lrme_flush_args.flush_type = args->flush_type;
+		lrme_flush_args.req_to_flush = req_to_flush;
+		rc = hw_device->hw_intf.hw_ops.flush(hw_device->hw_intf.hw_priv,
+			&lrme_flush_args,
+			sizeof(lrme_flush_args));
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Failed in HW Stop %d", rc);
+			goto end;
+		}
+	} else {
+		CAM_ERR(CAM_LRME, "No stop ops");
+		goto end;
+	}
+
+end:
+	return rc;
+}
+
+
+static int cam_lrme_mgr_hw_start(void *hw_mgr_priv, void *hw_start_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_start_args *args =
+		(struct cam_hw_start_args *)hw_start_args;
+	struct cam_lrme_device *hw_device;
+	uint32_t device_index;
+
+	if (!hw_mgr || !args) {
+		CAM_ERR(CAM_LRME, "Invalid input params");
+		return -EINVAL;
+	}
+
+	device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_LRME, "Start device index %d", device_index);
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to get hw device");
+		return rc;
+	}
+
+	if (hw_device->hw_intf.hw_ops.start) {
+		rc = hw_device->hw_intf.hw_ops.start(
+			hw_device->hw_intf.hw_priv, NULL, 0);
+	} else {
+		CAM_ERR(CAM_LRME, "Invalid start function");
+		return -EINVAL;
+	}
+
+	rc = hw_device->hw_intf.hw_ops.process_cmd(
+			hw_device->hw_intf.hw_priv,
+			CAM_LRME_HW_CMD_DUMP_REGISTER,
+			&g_lrme_hw_mgr.debugfs_entry.dump_register,
+			sizeof(bool));
+
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_stop(void *hw_mgr_priv, void *stop_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_stop_args *args =
+		(struct cam_hw_stop_args *)stop_args;
+	struct cam_lrme_device *hw_device;
+	uint32_t device_index;
+
+	if (!hw_mgr_priv || !stop_args) {
+		CAM_ERR(CAM_LRME, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+		return -EPERM;
+	}
+
+	CAM_DBG(CAM_LRME, "Stop device index %d", device_index);
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to get hw device");
+		return rc;
+	}
+
+	if (hw_device->hw_intf.hw_ops.stop) {
+		rc = hw_device->hw_intf.hw_ops.stop(
+			hw_device->hw_intf.hw_priv, NULL, 0);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Failed in HW stop %d", rc);
+			goto end;
+		}
+	}
+
+end:
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_prepare_update(void *hw_mgr_priv,
+	void *hw_prepare_update_args)
+{
+	int rc = 0, i;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_prepare_update_args *args =
+		(struct cam_hw_prepare_update_args *)hw_prepare_update_args;
+	struct cam_lrme_device *hw_device;
+	struct cam_kmd_buf_info kmd_buf;
+	struct cam_lrme_hw_cmd_config_args config_args;
+	struct cam_lrme_frame_request *frame_req = NULL;
+	uint32_t device_index;
+
+	if (!hw_mgr_priv || !hw_prepare_update_args) {
+		CAM_ERR(CAM_LRME, "Invalid args %pK %pK",
+			hw_mgr_priv, hw_prepare_update_args);
+		return -EINVAL;
+	}
+
+	device_index = CAM_LRME_DECODE_DEVICE_INDEX(args->ctxt_to_hw_map);
+	if (device_index >= hw_mgr->device_count) {
+		CAM_ERR(CAM_LRME, "Invalid device index %d", device_index);
+		return -EPERM;
+	}
+
+	rc = cam_lrme_mgr_util_get_device(hw_mgr, device_index, &hw_device);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in getting device %d", rc);
+		goto error;
+	}
+
+	rc = cam_lrme_mgr_util_packet_validate(args->packet);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in packet validation %d", rc);
+		goto error;
+	}
+
+	rc = cam_packet_util_get_kmd_buffer(args->packet, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in get kmd buf buffer %d", rc);
+		goto error;
+	}
+
+	CAM_DBG(CAM_LRME,
+		"KMD Buf : hdl=%d, cpu_addr=%pK, offset=%d, size=%d, used=%d",
+		kmd_buf.handle, kmd_buf.cpu_addr, kmd_buf.offset,
+		kmd_buf.size, kmd_buf.used_bytes);
+
+	rc = cam_packet_util_process_patches(args->packet,
+		hw_mgr->device_iommu.non_secure, hw_mgr->device_iommu.secure);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Patch packet failed, rc=%d", rc);
+		return rc;
+	}
+
+	memset(&config_args, 0, sizeof(config_args));
+	config_args.hw_device = hw_device;
+
+	rc = cam_lrme_mgr_util_prepare_io_buffer(
+		hw_mgr->device_iommu.non_secure, args,
+		config_args.input_buf, config_args.output_buf,
+		CAM_LRME_MAX_IO_BUFFER);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in prepare IO Buf %d", rc);
+		goto error;
+	}
+	/* Check port number */
+	if (args->num_in_map_entries == 0 || args->num_out_map_entries == 0) {
+		CAM_ERR(CAM_LRME, "Error in port number in %d, out %d",
+			args->num_in_map_entries, args->num_out_map_entries);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = cam_lrme_mgr_util_prepare_hw_update_entries(hw_mgr, args,
+		&config_args, &kmd_buf);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Error in hw update entries %d", rc);
+		goto error;
+	}
+
+	rc = cam_lrme_mgr_util_get_frame_req(&hw_mgr->frame_free_list,
+		&frame_req, &hw_mgr->free_req_lock);
+	if (rc || !frame_req) {
+		CAM_ERR(CAM_LRME, "Can not get free frame request");
+		goto error;
+	}
+
+	frame_req->ctxt_to_hw_map = args->ctxt_to_hw_map;
+	frame_req->req_id = args->packet->header.request_id;
+	frame_req->hw_device = hw_device;
+	frame_req->num_hw_update_entries = args->num_hw_update_entries;
+	for (i = 0; i < args->num_hw_update_entries; i++)
+		frame_req->hw_update_entries[i] = args->hw_update_entries[i];
+
+	args->priv = frame_req;
+
+	CAM_DBG(CAM_LRME, "FramePrepare : Frame[%lld]", frame_req->req_id);
+
+	return 0;
+
+error:
+	return rc;
+}
+
+static int cam_lrme_mgr_hw_config(void *hw_mgr_priv,
+	void *hw_config_args)
+{
+	int rc = 0;
+	struct cam_lrme_hw_mgr *hw_mgr = hw_mgr_priv;
+	struct cam_hw_config_args *args =
+		(struct cam_hw_config_args *)hw_config_args;
+	struct cam_lrme_frame_request *frame_req;
+	struct cam_lrme_device *hw_device = NULL;
+	enum cam_lrme_hw_mgr_ctx_priority priority;
+
+	if (!hw_mgr_priv || !hw_config_args) {
+		CAM_ERR(CAM_LRME, "Invalid arguments, hw_mgr %pK, config %pK",
+			hw_mgr_priv, hw_config_args);
+		return -EINVAL;
+	}
+
+	if (!args->num_hw_update_entries) {
+		CAM_ERR(CAM_LRME, "No hw update entries");
+		return -EINVAL;
+	}
+
+	frame_req = (struct cam_lrme_frame_request *)args->priv;
+	if (!frame_req) {
+		CAM_ERR(CAM_LRME, "No frame request");
+		return -EINVAL;
+	}
+
+	hw_device = frame_req->hw_device;
+	if (!hw_device)
+		return -EINVAL;
+
+	priority = CAM_LRME_DECODE_PRIORITY(args->ctxt_to_hw_map);
+	if (priority == CAM_LRME_PRIORITY_HIGH) {
+		cam_lrme_mgr_util_put_frame_req(
+			&hw_device->frame_pending_list_high,
+			&frame_req->frame_list, &hw_device->high_req_lock);
+	} else {
+		cam_lrme_mgr_util_put_frame_req(
+			&hw_device->frame_pending_list_normal,
+			&frame_req->frame_list, &hw_device->normal_req_lock);
+	}
+
+	CAM_DBG(CAM_LRME, "schedule req %llu", frame_req->req_id);
+	rc = cam_lrme_mgr_util_schedule_frame_req(hw_mgr, hw_device);
+
+	return rc;
+}
+
+static int cam_lrme_mgr_create_debugfs_entry(void)
+{
+	int rc = 0;
+
+	g_lrme_hw_mgr.debugfs_entry.dentry =
+		debugfs_create_dir("camera_lrme", NULL);
+	if (!g_lrme_hw_mgr.debugfs_entry.dentry) {
+		CAM_ERR(CAM_LRME, "failed to create dentry");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_bool("dump_register",
+		0644,
+		g_lrme_hw_mgr.debugfs_entry.dentry,
+		&g_lrme_hw_mgr.debugfs_entry.dump_register)) {
+		CAM_ERR(CAM_LRME, "failed to create dump register entry");
+		rc = -ENOMEM;
+		goto err;
+	}
+
+	return rc;
+
+err:
+	debugfs_remove_recursive(g_lrme_hw_mgr.debugfs_entry.dentry);
+	g_lrme_hw_mgr.debugfs_entry.dentry = NULL;
+	return rc;
+}
+
+
+int cam_lrme_mgr_register_device(
+	struct cam_hw_intf *lrme_hw_intf,
+	struct cam_iommu_handle *device_iommu,
+	struct cam_iommu_handle *cdm_iommu)
+{
+	struct cam_lrme_device *hw_device;
+	char buf[128];
+	int i, rc;
+
+	hw_device = &g_lrme_hw_mgr.hw_device[lrme_hw_intf->hw_idx];
+
+	g_lrme_hw_mgr.device_iommu = *device_iommu;
+	g_lrme_hw_mgr.cdm_iommu = *cdm_iommu;
+
+	memcpy(&hw_device->hw_intf, lrme_hw_intf, sizeof(struct cam_hw_intf));
+
+	spin_lock_init(&hw_device->high_req_lock);
+	spin_lock_init(&hw_device->normal_req_lock);
+	INIT_LIST_HEAD(&hw_device->frame_pending_list_high);
+	INIT_LIST_HEAD(&hw_device->frame_pending_list_normal);
+
+	rc = snprintf(buf, sizeof(buf), "cam_lrme_device_submit_worker%d",
+		lrme_hw_intf->hw_idx);
+	CAM_DBG(CAM_LRME, "Create submit workq for %s", buf);
+	rc = cam_req_mgr_workq_create(buf,
+		CAM_LRME_WORKQ_NUM_TASK,
+		&hw_device->work, CRM_WORKQ_USAGE_NON_IRQ,
+		0);
+	if (rc) {
+		CAM_ERR(CAM_LRME,
+			"Unable to create a worker, rc=%d", rc);
+		return rc;
+	}
+
+	for (i = 0; i < CAM_LRME_WORKQ_NUM_TASK; i++)
+		hw_device->work->task.pool[i].payload =
+			&hw_device->work_data[i];
+
+	if (hw_device->hw_intf.hw_ops.process_cmd) {
+		struct cam_lrme_hw_cmd_set_cb cb_args;
+
+		cb_args.cam_lrme_hw_mgr_cb = cam_lrme_mgr_cb;
+		cb_args.data = hw_device;
+
+		rc = hw_device->hw_intf.hw_ops.process_cmd(
+			hw_device->hw_intf.hw_priv,
+			CAM_LRME_HW_CMD_REGISTER_CB,
+			&cb_args, sizeof(cb_args));
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Register cb failed");
+			goto destroy_workqueue;
+		}
+		CAM_DBG(CAM_LRME, "cb registered");
+	}
+
+	if (hw_device->hw_intf.hw_ops.get_hw_caps) {
+		rc = hw_device->hw_intf.hw_ops.get_hw_caps(
+			hw_device->hw_intf.hw_priv, &hw_device->hw_caps,
+			sizeof(hw_device->hw_caps));
+		if (rc)
+			CAM_ERR(CAM_LRME, "Get caps failed");
+	} else {
+		CAM_ERR(CAM_LRME, "No get_hw_caps function");
+		goto destroy_workqueue;
+	}
+	g_lrme_hw_mgr.lrme_caps.dev_caps[lrme_hw_intf->hw_idx] =
+		hw_device->hw_caps;
+	g_lrme_hw_mgr.device_count++;
+	g_lrme_hw_mgr.lrme_caps.device_iommu = g_lrme_hw_mgr.device_iommu;
+	g_lrme_hw_mgr.lrme_caps.cdm_iommu = g_lrme_hw_mgr.cdm_iommu;
+	g_lrme_hw_mgr.lrme_caps.num_devices = g_lrme_hw_mgr.device_count;
+
+	hw_device->valid = true;
+
+	CAM_DBG(CAM_LRME, "device registration done");
+	return 0;
+
+destroy_workqueue:
+	cam_req_mgr_workq_destroy(&hw_device->work);
+
+	return rc;
+}
+
+int cam_lrme_mgr_deregister_device(int device_index)
+{
+	struct cam_lrme_device *hw_device;
+
+	hw_device = &g_lrme_hw_mgr.hw_device[device_index];
+	cam_req_mgr_workq_destroy(&hw_device->work);
+	memset(hw_device, 0x0, sizeof(struct cam_lrme_device));
+	g_lrme_hw_mgr.device_count--;
+
+	return 0;
+}
+
+int cam_lrme_hw_mgr_deinit(void)
+{
+	mutex_destroy(&g_lrme_hw_mgr.hw_mgr_mutex);
+	memset(&g_lrme_hw_mgr, 0x0, sizeof(g_lrme_hw_mgr));
+
+	return 0;
+}
+
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+	cam_hw_event_cb_func cam_lrme_dev_buf_done_cb)
+{
+	int i, rc = 0;
+	struct cam_lrme_frame_request *frame_req;
+
+	if (!hw_mgr_intf)
+		return -EINVAL;
+
+	CAM_DBG(CAM_LRME, "device count %d", g_lrme_hw_mgr.device_count);
+	if (g_lrme_hw_mgr.device_count > CAM_LRME_HW_MAX) {
+		CAM_ERR(CAM_LRME, "Invalid count of devices");
+		return -EINVAL;
+	}
+
+	memset(hw_mgr_intf, 0, sizeof(*hw_mgr_intf));
+
+	mutex_init(&g_lrme_hw_mgr.hw_mgr_mutex);
+	spin_lock_init(&g_lrme_hw_mgr.free_req_lock);
+	INIT_LIST_HEAD(&g_lrme_hw_mgr.frame_free_list);
+
+	/* Init hw mgr frame requests and add to free list */
+	for (i = 0; i < CAM_CTX_REQ_MAX * CAM_CTX_MAX; i++) {
+		frame_req = &g_lrme_hw_mgr.frame_req[i];
+
+		memset(frame_req, 0x0, sizeof(*frame_req));
+		INIT_LIST_HEAD(&frame_req->frame_list);
+
+		list_add_tail(&frame_req->frame_list,
+			&g_lrme_hw_mgr.frame_free_list);
+	}
+
+	hw_mgr_intf->hw_mgr_priv = &g_lrme_hw_mgr;
+	hw_mgr_intf->hw_get_caps = cam_lrme_mgr_get_caps;
+	hw_mgr_intf->hw_acquire = cam_lrme_mgr_hw_acquire;
+	hw_mgr_intf->hw_release = cam_lrme_mgr_hw_release;
+	hw_mgr_intf->hw_start = cam_lrme_mgr_hw_start;
+	hw_mgr_intf->hw_stop = cam_lrme_mgr_hw_stop;
+	hw_mgr_intf->hw_prepare_update = cam_lrme_mgr_hw_prepare_update;
+	hw_mgr_intf->hw_config = cam_lrme_mgr_hw_config;
+	hw_mgr_intf->hw_read = NULL;
+	hw_mgr_intf->hw_write = NULL;
+	hw_mgr_intf->hw_close = NULL;
+	hw_mgr_intf->hw_flush = cam_lrme_mgr_hw_flush;
+
+	g_lrme_hw_mgr.event_cb = cam_lrme_dev_buf_done_cb;
+
+	cam_lrme_mgr_create_debugfs_entry();
+
+	CAM_DBG(CAM_LRME, "Hw mgr init done");
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
new file mode 100644
index 0000000..e315d3f8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_LRME_HW_MGR_H_
+#define _CAM_LRME_HW_MGR_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include <media/cam_lrme.h>
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_cpas_api.h"
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_context.h"
+
+#define CAM_LRME_HW_MAX 1
+#define CAM_LRME_WORKQ_NUM_TASK 10
+
+#define CAM_LRME_DECODE_DEVICE_INDEX(ctxt_to_hw_map) \
+	((uintptr_t)ctxt_to_hw_map & 0xF)
+
+#define CAM_LRME_DECODE_PRIORITY(ctxt_to_hw_map) \
+	(((uintptr_t)ctxt_to_hw_map & 0xF0) >> 4)
+
+#define CAM_LRME_DECODE_CTX_INDEX(ctxt_to_hw_map) \
+	((uint64_t)(uintptr_t)ctxt_to_hw_map >> CAM_LRME_CTX_INDEX_SHIFT)
+
+/**
+ * enum cam_lrme_hw_mgr_ctx_priority
+ *
+ * CAM_LRME_PRIORITY_HIGH   : High priority client
+ * CAM_LRME_PRIORITY_NORMAL : Normal priority client
+ */
+enum cam_lrme_hw_mgr_ctx_priority {
+	CAM_LRME_PRIORITY_HIGH,
+	CAM_LRME_PRIORITY_NORMAL,
+};
+
+/**
+ * struct cam_lrme_mgr_work_data : HW Mgr work data
+ *
+ * @hw_device                    : Pointer to the hw device
+ */
+struct cam_lrme_mgr_work_data {
+	struct cam_lrme_device *hw_device;
+};
+
+/**
+ * struct cam_lrme_debugfs_entry : debugfs entry struct
+ *
+ * @dentry                       : entry of debugfs
+ * @dump_register                : flag to dump registers
+ */
+struct cam_lrme_debugfs_entry {
+	struct dentry   *dentry;
+	bool             dump_register;
+};
+
+/**
+ * struct cam_lrme_device     : LRME HW device
+ *
+ * @hw_caps                   : HW device's capabilities
+ * @hw_intf                   : HW device's interface information
+ * @num_context               : Number of contexts using this device
+ * @valid                     : Whether this device is valid
+ * @work                      : HW device's work queue
+ * @work_data                 : HW device's work data
+ * @frame_pending_list_high   : High priority request queue
+ * @frame_pending_list_normal : Normal priority request queue
+ * @high_req_lock             : Spinlock of high priority queue
+ * @normal_req_lock           : Spinlock of normal priority queue
+ */
+struct cam_lrme_device {
+	struct cam_lrme_dev_cap        hw_caps;
+	struct cam_hw_intf             hw_intf;
+	uint32_t                       num_context;
+	bool                           valid;
+	struct cam_req_mgr_core_workq *work;
+	struct cam_lrme_mgr_work_data  work_data[CAM_LRME_WORKQ_NUM_TASK];
+	struct list_head               frame_pending_list_high;
+	struct list_head               frame_pending_list_normal;
+	spinlock_t                     high_req_lock;
+	spinlock_t                     normal_req_lock;
+};
+
+/**
+ * struct cam_lrme_hw_mgr : LRME HW manager
+ *
+ * @device_count    : Number of HW devices
+ * @frame_free_list : List of free frame request
+ * @hw_mgr_mutex    : Mutex to protect HW manager data
+ * @free_req_lock   :Spinlock to protect frame_free_list
+ * @hw_device       : List of HW devices
+ * @device_iommu    : Device iommu
+ * @cdm_iommu       : cdm iommu
+ * @frame_req       : List of frame request to use
+ * @lrme_caps       : LRME capabilities
+ * @event_cb        : IRQ callback function
+ * @debugfs_entry   : debugfs entry to set debug prop
+ */
+struct cam_lrme_hw_mgr {
+	uint32_t                      device_count;
+	struct list_head              frame_free_list;
+	struct mutex                  hw_mgr_mutex;
+	spinlock_t                    free_req_lock;
+	struct cam_lrme_device        hw_device[CAM_LRME_HW_MAX];
+	struct cam_iommu_handle       device_iommu;
+	struct cam_iommu_handle       cdm_iommu;
+	struct cam_lrme_frame_request frame_req[CAM_CTX_REQ_MAX * CAM_CTX_MAX];
+	struct cam_lrme_query_cap_cmd lrme_caps;
+	cam_hw_event_cb_func          event_cb;
+	struct cam_lrme_debugfs_entry debugfs_entry;
+};
+
+int cam_lrme_mgr_register_device(struct cam_hw_intf *lrme_hw_intf,
+	struct cam_iommu_handle *device_iommu,
+	struct cam_iommu_handle *cdm_iommu);
+int cam_lrme_mgr_deregister_device(int device_index);
+
+#endif /* _CAM_LRME_HW_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
new file mode 100644
index 0000000..decf2c9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/cam_lrme_hw_mgr_intf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_LRME_HW_MGR_INTF_H_
+#define _CAM_LRME_HW_MGR_INTF_H_
+
+#include <linux/of.h>
+
+#include "cam_debug_util.h"
+#include "cam_hw_mgr_intf.h"
+
+int cam_lrme_hw_mgr_init(struct cam_hw_mgr_intf *hw_mgr_intf,
+	cam_hw_event_cb_func cam_lrme_dev_buf_done_cb);
+int cam_lrme_hw_mgr_deinit(void);
+
+#endif /* _CAM_LRME_HW_MGR_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
new file mode 100644
index 0000000..7a14297
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sync
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cdm
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw
+ccflags-y += -Idrivers/media/platform/msm/camera0
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_lrme_hw_dev.o cam_lrme_hw_core.o cam_lrme_hw_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
new file mode 100644
index 0000000..cc86621
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.c
@@ -0,0 +1,1266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_smmu_api.h"
+
+static void cam_lrme_dump_registers(void __iomem *base)
+{
+	/* dump the clc registers */
+	cam_io_dump(base, 0x60, (0xc0 - 0x60) / 0x4);
+	/* dump the fe and we registers */
+	cam_io_dump(base, 0x200, (0x29c - 0x200) / 0x4);
+	cam_io_dump(base, 0x2f0, (0x330 - 0x2f0) / 0x4);
+	cam_io_dump(base, 0x500, (0x5b4 - 0x500) / 0x4);
+	cam_io_dump(base, 0x700, (0x778 - 0x700) / 0x4);
+	cam_io_dump(base, 0x800, (0x878 - 0x800) / 0x4);
+	/* dump lrme sw registers, interrupts */
+	cam_io_dump(base, 0x900, (0x928 - 0x900) / 0x4);
+}
+
+static void cam_lrme_cdm_write_reg_val_pair(uint32_t *buffer,
+	uint32_t *index, uint32_t reg_offset, uint32_t reg_value)
+{
+	buffer[(*index)++] = reg_offset;
+	buffer[(*index)++] = reg_value;
+}
+
+static void cam_lrme_hw_util_fill_fe_reg(struct cam_lrme_hw_io_buffer *io_buf,
+	uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+	struct cam_lrme_hw_info *hw_info)
+{
+	uint32_t reg_val;
+
+	/* 1. config buffer size */
+	reg_val = io_buf->io_cfg->planes[0].width;
+	reg_val |= (io_buf->io_cfg->planes[0].height << 16);
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].rd_buffer_size,
+		reg_val);
+
+	CAM_DBG(CAM_LRME,
+		"width %d", io_buf->io_cfg->planes[0].width);
+	CAM_DBG(CAM_LRME,
+		"height %d", io_buf->io_cfg->planes[0].height);
+
+	/* 2. config image address */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].addr_image,
+		io_buf->io_addr[0]);
+
+	CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+	/* 3. config stride */
+	reg_val = io_buf->io_cfg->planes[0].plane_stride;
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].rd_stride,
+		reg_val);
+
+	CAM_DBG(CAM_LRME, "plane_stride %d",
+		io_buf->io_cfg->planes[0].plane_stride);
+
+	/* 4. enable client */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_rd_reg.bus_client_reg[index].core_cfg, 0x1);
+
+	/* 5. unpack_cfg */
+	if (io_buf->io_cfg->format == CAM_FORMAT_PD10)
+		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+			hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+			0x0);
+	else if (io_buf->io_cfg->format == CAM_FORMAT_Y_ONLY ||
+			io_buf->io_cfg->format == CAM_FORMAT_PLAIN8 ||
+			io_buf->io_cfg->format == CAM_FORMAT_NV12 ||
+			io_buf->io_cfg->format == CAM_FORMAT_NV21)
+		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+			hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+			0x1);
+	else if (io_buf->io_cfg->format == CAM_FORMAT_PLAIN16_10)
+		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+			hw_info->bus_rd_reg.bus_client_reg[index].unpack_cfg_0,
+			0x22);
+	else
+		CAM_ERR(CAM_LRME, "Unsupported format %d",
+			io_buf->io_cfg->format);
+}
+
+static void cam_lrme_hw_util_fill_we_reg(struct cam_lrme_hw_io_buffer *io_buf,
+	uint32_t index, uint32_t *reg_val_pair, uint32_t *num_cmd,
+	struct cam_lrme_hw_info *hw_info)
+{
+	/* config client mode */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].cfg,
+		0x1);
+
+	/* image address */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].addr_image,
+		io_buf->io_addr[0]);
+	CAM_DBG(CAM_LRME, "io addr %llu", io_buf->io_addr[0]);
+
+	/* buffer width and height */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].buffer_width_cfg,
+		io_buf->io_cfg->planes[0].width);
+	CAM_DBG(CAM_LRME, "width %d", io_buf->io_cfg->planes[0].width);
+
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].buffer_height_cfg,
+		io_buf->io_cfg->planes[0].height);
+	CAM_DBG(CAM_LRME, "height %d", io_buf->io_cfg->planes[0].height);
+
+	/* packer cfg */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].packer_cfg,
+		(index == 0) ? 0x1 : 0x5);
+
+	/* client stride */
+	cam_lrme_cdm_write_reg_val_pair(reg_val_pair, num_cmd,
+		hw_info->bus_wr_reg.bus_client_reg[index].wr_stride,
+		io_buf->io_cfg->planes[0].plane_stride);
+	CAM_DBG(CAM_LRME, "plane_stride %d",
+		io_buf->io_cfg->planes[0].plane_stride);
+}
+
+
+static int cam_lrme_hw_util_process_config_hw(struct cam_hw_info *lrme_hw,
+	struct cam_lrme_hw_cmd_config_args *config_args)
+{
+	int i;
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_cdm_info *hw_cdm_info;
+	uint32_t *cmd_buf_addr = config_args->cmd_buf_addr;
+	uint32_t reg_val_pair[CAM_LRME_MAX_REG_PAIR_NUM];
+	struct cam_lrme_hw_io_buffer *io_buf;
+	struct cam_lrme_hw_info *hw_info =
+		((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+	uint32_t num_cmd = 0;
+	uint32_t size;
+	uint32_t mem_base, available_size = config_args->size;
+	uint32_t output_res_mask = 0, input_res_mask = 0;
+
+
+	if (!cmd_buf_addr) {
+		CAM_ERR(CAM_LRME, "Invalid input args");
+		return -EINVAL;
+	}
+
+	hw_cdm_info =
+		((struct cam_lrme_core *)lrme_hw->core_info)->hw_cdm_info;
+
+	for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+		io_buf = &config_args->input_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_INPUT) {
+			CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+		CAM_DBG(CAM_LRME,
+			"resource_type %d", io_buf->io_cfg->resource_type);
+
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_LRME_IO_TYPE_TAR:
+			cam_lrme_hw_util_fill_fe_reg(io_buf, 0, reg_val_pair,
+				&num_cmd, hw_info);
+
+			input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_TAR;
+			break;
+		case CAM_LRME_IO_TYPE_REF:
+			cam_lrme_hw_util_fill_fe_reg(io_buf, 1, reg_val_pair,
+				&num_cmd, hw_info);
+
+			input_res_mask |= CAM_LRME_INPUT_PORT_TYPE_REF;
+			break;
+		default:
+			CAM_ERR(CAM_LRME, "wrong resource_type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+		if (!((input_res_mask >> i) & 0x1))
+			cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+				hw_info->bus_rd_reg.bus_client_reg[i].core_cfg,
+				0x0);
+
+	for (i = 0; i < CAM_LRME_MAX_IO_BUFFER; i++) {
+		io_buf = &config_args->output_buf[i];
+
+		if (io_buf->valid == false)
+			break;
+
+		if (io_buf->io_cfg->direction != CAM_BUF_OUTPUT) {
+			CAM_ERR(CAM_LRME, "Incorrect direction %d %d",
+				io_buf->io_cfg->direction, CAM_BUF_INPUT);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_LRME, "resource_type %d",
+			io_buf->io_cfg->resource_type);
+		switch (io_buf->io_cfg->resource_type) {
+		case CAM_LRME_IO_TYPE_DS2:
+			cam_lrme_hw_util_fill_we_reg(io_buf, 0, reg_val_pair,
+				&num_cmd, hw_info);
+
+			output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_DS2;
+			break;
+		case CAM_LRME_IO_TYPE_RES:
+			cam_lrme_hw_util_fill_we_reg(io_buf, 1, reg_val_pair,
+				&num_cmd, hw_info);
+
+			output_res_mask |= CAM_LRME_OUTPUT_PORT_TYPE_RES;
+			break;
+
+		default:
+			CAM_ERR(CAM_LRME, "wrong resource_type %d",
+				io_buf->io_cfg->resource_type);
+			return -EINVAL;
+		}
+	}
+
+	for (i = 0; i < CAM_LRME_BUS_RD_MAX_CLIENTS; i++)
+		if (!((output_res_mask >> i) & 0x1))
+			cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+				hw_info->bus_wr_reg.bus_client_reg[i].cfg, 0x0);
+
+	if (output_res_mask) {
+		/* write composite mask */
+		cam_lrme_cdm_write_reg_val_pair(reg_val_pair, &num_cmd,
+			hw_info->bus_wr_reg.common_reg.composite_mask_0,
+			output_res_mask);
+	}
+
+	size = hw_cdm_info->cdm_ops->cdm_required_size_changebase();
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+			available_size, size);
+		return -EINVAL;
+	}
+
+	mem_base = CAM_SOC_GET_REG_MAP_CAM_BASE(soc_info, CAM_LRME_BASE_IDX);
+
+	hw_cdm_info->cdm_ops->cdm_write_changebase(cmd_buf_addr, mem_base);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	size = hw_cdm_info->cdm_ops->cdm_required_size_reg_random(
+		num_cmd / 2);
+
+	if ((size * 4) > available_size) {
+		CAM_ERR(CAM_LRME, "buf size:%d is not sufficient, expected: %d",
+			available_size, size);
+		return -ENOMEM;
+	}
+
+	hw_cdm_info->cdm_ops->cdm_write_regrandom(cmd_buf_addr, num_cmd / 2,
+		reg_val_pair);
+	cmd_buf_addr += size;
+	available_size -= (size * 4);
+
+	config_args->config_buf_size =
+		config_args->size - available_size;
+
+	return 0;
+}
+
+static int cam_lrme_hw_util_submit_go(struct cam_hw_info *lrme_hw)
+{
+	struct cam_lrme_core *lrme_core;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_lrme_hw_info   *hw_info;
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	hw_info = lrme_core->hw_info;
+	soc_info = &lrme_hw->soc_info;
+
+	cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.cmd);
+
+	return 0;
+}
+
+static int cam_lrme_hw_util_reset(struct cam_hw_info *lrme_hw,
+	uint32_t reset_type)
+{
+	struct cam_lrme_core *lrme_core;
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_hw_info *hw_info;
+	long time_left;
+
+	lrme_core = lrme_hw->core_info;
+	hw_info = lrme_core->hw_info;
+
+	switch (reset_type) {
+	case CAM_LRME_HW_RESET_TYPE_HW_RESET:
+		reinit_completion(&lrme_core->reset_complete);
+		cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+			hw_info->titan_reg.top_rst_cmd);
+		time_left = wait_for_completion_timeout(
+			&lrme_core->reset_complete,
+			msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+		if (time_left <= 0) {
+			CAM_ERR(CAM_LRME,
+				"HW reset wait failed time_left=%ld",
+				time_left);
+			return -ETIMEDOUT;
+		}
+		break;
+	case CAM_LRME_HW_RESET_TYPE_SW_RESET:
+		cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.sw_reset);
+		cam_io_w_mb(0x3, soc_info->reg_map[0].mem_base +
+			hw_info->bus_rd_reg.common_reg.sw_reset);
+		reinit_completion(&lrme_core->reset_complete);
+		cam_io_w_mb(0x2, soc_info->reg_map[0].mem_base +
+			hw_info->titan_reg.top_rst_cmd);
+		time_left = wait_for_completion_timeout(
+			&lrme_core->reset_complete,
+			msecs_to_jiffies(CAM_LRME_HW_RESET_TIMEOUT));
+		if (time_left <= 0) {
+			CAM_ERR(CAM_LRME,
+				"SW reset wait failed time_left=%ld",
+				time_left);
+			return -ETIMEDOUT;
+		}
+		break;
+	}
+
+	return 0;
+}
+
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+	struct cam_lrme_dev_cap *hw_caps)
+{
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_hw_info *hw_info =
+		((struct cam_lrme_core *)lrme_hw->core_info)->hw_info;
+	uint32_t reg_value;
+
+	if (!hw_info) {
+		CAM_ERR(CAM_LRME, "Invalid hw info data");
+		return -EINVAL;
+	}
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->clc_reg.clc_hw_version);
+	hw_caps->clc_hw_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->clc_hw_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->clc_hw_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.hw_version);
+	hw_caps->bus_rd_hw_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->bus_rd_hw_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->bus_rd_hw_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.hw_version);
+	hw_caps->bus_wr_hw_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->bus_wr_hw_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->bus_wr_hw_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_hw_version);
+	hw_caps->top_hw_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->top_hw_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->top_hw_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	reg_value = cam_io_r_mb(soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_titan_version);
+	hw_caps->top_titan_version.gen =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xf0000000, 0x1C);
+	hw_caps->top_titan_version.rev =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xfff0000, 0x10);
+	hw_caps->top_titan_version.step =
+		CAM_BITS_MASK_SHIFT(reg_value, 0xffff, 0x0);
+
+	return 0;
+}
+
+static int cam_lrme_hw_util_submit_req(struct cam_lrme_core *lrme_core,
+	struct cam_lrme_frame_request *frame_req)
+{
+	struct cam_lrme_cdm_info *hw_cdm_info =
+		lrme_core->hw_cdm_info;
+	struct cam_cdm_bl_request *cdm_cmd = hw_cdm_info->cdm_cmd;
+	struct cam_hw_update_entry *cmd;
+	int i, rc = 0;
+
+	if (frame_req->num_hw_update_entries > 0) {
+		cdm_cmd->cmd_arrary_count = frame_req->num_hw_update_entries;
+		cdm_cmd->type = CAM_CDM_BL_CMD_TYPE_MEM_HANDLE;
+		cdm_cmd->flag = false;
+		cdm_cmd->userdata = NULL;
+		cdm_cmd->cookie = 0;
+
+		for (i = 0; i <= frame_req->num_hw_update_entries; i++) {
+			cmd = (frame_req->hw_update_entries + i);
+			cdm_cmd->cmd[i].bl_addr.mem_handle = cmd->handle;
+			cdm_cmd->cmd[i].offset = cmd->offset;
+			cdm_cmd->cmd[i].len = cmd->len;
+		}
+
+		rc = cam_cdm_submit_bls(hw_cdm_info->cdm_handle, cdm_cmd);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Failed to submit cdm commands");
+			return -EINVAL;
+		}
+	} else {
+		CAM_ERR(CAM_LRME, "No hw update entry");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_lrme_hw_util_flush_ctx(struct cam_hw_info *lrme_hw,
+	void *ctxt_to_hw_map)
+{
+	int rc = -ENODEV;
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	struct cam_lrme_hw_cb_args cb_args;
+	struct cam_lrme_frame_request *req_proc, *req_submit;
+	struct cam_lrme_hw_submit_args submit_args;
+
+	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "reset failed");
+		return rc;
+	}
+
+	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+	req_proc = lrme_core->req_proc;
+	req_submit = lrme_core->req_submit;
+	lrme_core->req_proc = NULL;
+	lrme_core->req_submit = NULL;
+
+	if (req_submit && req_submit->ctxt_to_hw_map == ctxt_to_hw_map) {
+		cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+		cb_args.frame_req = req_submit;
+		if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+			lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+				lrme_core->hw_mgr_cb.data, &cb_args);
+	} else if (req_submit) {
+		submit_args.frame_req = req_submit;
+		submit_args.hw_update_entries = req_submit->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			req_submit->num_hw_update_entries;
+		rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Submit failed");
+		lrme_core->req_submit = req_submit;
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+	}
+
+	if (req_proc && req_proc->ctxt_to_hw_map == ctxt_to_hw_map) {
+		cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+		cb_args.frame_req = req_proc;
+		if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+			lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+				lrme_core->hw_mgr_cb.data, &cb_args);
+	} else if (req_proc) {
+		submit_args.frame_req = req_proc;
+		submit_args.hw_update_entries = req_proc->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			req_proc->num_hw_update_entries;
+		rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Submit failed");
+		lrme_core->req_submit = req_proc;
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+	}
+
+	return rc;
+}
+
+static int cam_lrme_hw_util_flush_req(struct cam_hw_info *lrme_hw,
+	struct cam_lrme_frame_request *req_to_flush)
+{
+	int rc = -ENODEV;
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	struct cam_lrme_hw_cb_args cb_args;
+	struct cam_lrme_frame_request *req_proc, *req_submit;
+	struct cam_lrme_hw_submit_args submit_args;
+
+	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "reset failed");
+		return rc;
+	}
+
+	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+	req_proc = lrme_core->req_proc;
+	req_submit = lrme_core->req_submit;
+	lrme_core->req_proc = NULL;
+	lrme_core->req_submit = NULL;
+
+	if (req_submit && req_submit == req_to_flush) {
+		cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+		cb_args.frame_req = req_submit;
+		if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+			lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+				lrme_core->hw_mgr_cb.data, &cb_args);
+	} else if (req_submit) {
+		submit_args.frame_req = req_submit;
+		submit_args.hw_update_entries = req_submit->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			req_submit->num_hw_update_entries;
+		rc = cam_lrme_hw_util_submit_req(lrme_core, req_submit);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Submit failed");
+		lrme_core->req_submit = req_submit;
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+	}
+
+	if (req_proc && req_proc == req_to_flush) {
+		cb_args.cb_type = CAM_LRME_CB_PUT_FRAME;
+		cb_args.frame_req = req_proc;
+		if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb)
+			lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+				lrme_core->hw_mgr_cb.data, &cb_args);
+	} else if (req_proc) {
+		submit_args.frame_req = req_proc;
+		submit_args.hw_update_entries = req_proc->hw_update_entries;
+		submit_args.num_hw_update_entries =
+			req_proc->num_hw_update_entries;
+		rc = cam_lrme_hw_util_submit_req(lrme_core, req_proc);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Submit failed");
+		lrme_core->req_submit = req_proc;
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+	}
+
+	return rc;
+}
+
+
+static int cam_lrme_hw_util_process_err(struct cam_hw_info *lrme_hw)
+{
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	struct cam_lrme_frame_request *req_proc, *req_submit;
+	struct cam_lrme_hw_cb_args cb_args;
+	int rc;
+
+	req_proc = lrme_core->req_proc;
+	req_submit = lrme_core->req_submit;
+	cb_args.cb_type = CAM_LRME_CB_ERROR;
+
+	if ((lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) &&
+		(lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING) &&
+		(lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND)) {
+		CAM_ERR(CAM_LRME, "Get error irq in wrong state %d",
+			lrme_core->state);
+	}
+
+	cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base);
+
+	CAM_ERR_RATE_LIMIT(CAM_LRME, "Start recovery");
+	lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed to reset");
+
+	lrme_core->req_proc = NULL;
+	lrme_core->req_submit = NULL;
+	if (!rc)
+		lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+	cb_args.frame_req = req_proc;
+	lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+		&cb_args);
+
+	cb_args.frame_req = req_submit;
+	lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(lrme_core->hw_mgr_cb.data,
+		&cb_args);
+
+	return rc;
+}
+
+static int cam_lrme_hw_util_process_reg_update(
+	struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	int rc = 0;
+
+	cb_args->cb_type |= CAM_LRME_CB_COMP_REG_UPDATE;
+	if (lrme_core->state == CAM_LRME_CORE_STATE_REQ_PENDING) {
+		lrme_core->state = CAM_LRME_CORE_STATE_PROCESSING;
+	} else {
+		CAM_ERR(CAM_LRME, "Reg update in wrong state %d",
+			lrme_core->state);
+		rc = cam_lrme_hw_util_process_err(lrme_hw);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Failed to reset");
+		return -EINVAL;
+	}
+
+	lrme_core->req_proc = lrme_core->req_submit;
+	lrme_core->req_submit = NULL;
+
+	if (lrme_core->dump_flag)
+		cam_lrme_dump_registers(lrme_hw->soc_info.reg_map[0].mem_base);
+
+	return 0;
+}
+
+static int cam_lrme_hw_util_process_idle(
+	struct cam_hw_info *lrme_hw, struct cam_lrme_hw_cb_args *cb_args)
+{
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	int rc = 0;
+
+	cb_args->cb_type |= CAM_LRME_CB_BUF_DONE;
+	switch (lrme_core->state) {
+	case CAM_LRME_CORE_STATE_REQ_PROC_PEND:
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+		break;
+
+	case CAM_LRME_CORE_STATE_PROCESSING:
+		lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+		break;
+
+	default:
+		CAM_ERR(CAM_LRME, "Idle in wrong state %d",
+			lrme_core->state);
+		rc = cam_lrme_hw_util_process_err(lrme_hw);
+		return rc;
+	}
+	cb_args->frame_req = lrme_core->req_proc;
+	lrme_core->req_proc = NULL;
+
+	return 0;
+}
+
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw,
+	enum cam_lrme_irq_set set)
+{
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_core *lrme_core = lrme_hw->core_info;
+	struct cam_lrme_hw_info *hw_info = lrme_core->hw_info;
+
+	switch (set) {
+	case CAM_LRME_IRQ_ENABLE:
+		cam_io_w_mb(0xFFFF,
+			soc_info->reg_map[0].mem_base +
+			hw_info->titan_reg.top_irq_mask);
+		cam_io_w_mb(0xFFFFF,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.irq_mask_0);
+		cam_io_w_mb(0xFFFFF,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.irq_mask_1);
+		cam_io_w_mb(0xFFFFF,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_rd_reg.common_reg.irq_mask);
+		break;
+
+	case CAM_LRME_IRQ_DISABLE:
+		cam_io_w_mb(0x0,
+			soc_info->reg_map[0].mem_base +
+			hw_info->titan_reg.top_irq_mask);
+		cam_io_w_mb(0x0,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.irq_mask_0);
+		cam_io_w_mb(0x0,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_wr_reg.common_reg.irq_mask_1);
+		cam_io_w_mb(0x0,
+			soc_info->reg_map[0].mem_base +
+			hw_info->bus_rd_reg.common_reg.irq_mask);
+		break;
+	}
+}
+
+
+int cam_lrme_hw_process_irq(void *priv, void *data)
+{
+	struct cam_lrme_hw_work_data *work_data;
+	struct cam_hw_info *lrme_hw;
+	struct cam_lrme_core *lrme_core;
+	int rc = 0;
+	uint32_t top_irq_status, fe_irq_status;
+	uint32_t *we_irq_status;
+	struct cam_lrme_hw_cb_args cb_args;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_LRME, "Invalid data %pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	memset(&cb_args, 0, sizeof(struct cam_lrme_hw_cb_args));
+	lrme_hw = (struct cam_hw_info *)priv;
+	work_data = (struct cam_lrme_hw_work_data *)data;
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	top_irq_status = work_data->top_irq_status;
+	fe_irq_status = work_data->fe_irq_status;
+	we_irq_status = work_data->we_irq_status;
+
+	CAM_DBG(CAM_LRME,
+		"top status %x, fe status %x, we status0 %x, we status1 %x",
+		top_irq_status, fe_irq_status, we_irq_status[0],
+		we_irq_status[1]);
+	CAM_DBG(CAM_LRME, "Current state %d", lrme_core->state);
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (top_irq_status & (1 << 3)) {
+		CAM_DBG(CAM_LRME, "Error");
+		rc = cam_lrme_hw_util_process_err(lrme_hw);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Process error failed");
+		goto end;
+	}
+
+	if (we_irq_status[0] & (1 << 1)) {
+		CAM_DBG(CAM_LRME, "reg update");
+		rc = cam_lrme_hw_util_process_reg_update(lrme_hw, &cb_args);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Process reg_update failed");
+			goto end;
+		}
+	}
+
+	if (top_irq_status & (1 << 4)) {
+		CAM_DBG(CAM_LRME, "IDLE");
+		if (!lrme_core->req_proc) {
+			CAM_DBG(CAM_LRME, "No frame request to process idle");
+			goto end;
+		}
+		rc = cam_lrme_hw_util_process_idle(lrme_hw, &cb_args);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Process idle failed");
+			goto end;
+		}
+	}
+
+	if (lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb) {
+		lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb(
+			lrme_core->hw_mgr_cb.data, &cb_args);
+	} else {
+		CAM_ERR(CAM_LRME, "No hw mgr cb");
+		rc = -EINVAL;
+	}
+
+end:
+	mutex_unlock(&lrme_hw->hw_mutex);
+	return rc;
+}
+
+int cam_lrme_hw_start(void *hw_priv, void *hw_start_args, uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+	int rc = 0;
+	struct cam_lrme_core *lrme_core;
+
+	if (!lrme_hw) {
+		CAM_ERR(CAM_LRME,
+			"Invalid input params, lrme_hw %pK",
+			lrme_hw);
+		return -EINVAL;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (lrme_hw->open_count > 0) {
+		lrme_hw->open_count++;
+		CAM_DBG(CAM_LRME, "This device is activated before");
+		goto unlock;
+	}
+
+	rc = cam_lrme_soc_enable_resources(lrme_hw);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to enable soc resources");
+		goto unlock;
+	}
+
+	rc = cam_lrme_hw_util_reset(lrme_hw, CAM_LRME_HW_RESET_TYPE_HW_RESET);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to reset hw");
+		goto disable_soc;
+	}
+
+	if (lrme_core->hw_cdm_info) {
+		struct cam_lrme_cdm_info *hw_cdm_info =
+			lrme_core->hw_cdm_info;
+
+		rc = cam_cdm_stream_on(hw_cdm_info->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_LRME, "Failed to stream on cdm");
+			goto disable_soc;
+		}
+	}
+
+	lrme_hw->hw_state = CAM_HW_STATE_POWER_UP;
+	lrme_hw->open_count++;
+	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+	CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
+	mutex_unlock(&lrme_hw->hw_mutex);
+	return rc;
+
+disable_soc:
+	if (cam_lrme_soc_disable_resources(lrme_hw))
+		CAM_ERR(CAM_LRME, "Error in disable soc resources");
+unlock:
+	CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
+	mutex_unlock(&lrme_hw->hw_mutex);
+	return rc;
+}
+
+int cam_lrme_hw_stop(void *hw_priv, void *hw_stop_args, uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+	int rc = 0;
+	struct cam_lrme_core *lrme_core;
+
+	if (!lrme_hw) {
+		CAM_ERR(CAM_LRME, "Invalid argument");
+		return -EINVAL;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (lrme_hw->open_count == 0 ||
+		lrme_hw->hw_state == CAM_HW_STATE_POWER_DOWN) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_ERR(CAM_LRME, "Error Unbalanced stop");
+		return -EINVAL;
+	}
+	lrme_hw->open_count--;
+
+	CAM_DBG(CAM_LRME, "open count %d", lrme_hw->open_count);
+
+	if (lrme_hw->open_count)
+		goto unlock;
+
+	lrme_core->req_proc = NULL;
+	lrme_core->req_submit = NULL;
+
+	if (lrme_core->hw_cdm_info) {
+		struct cam_lrme_cdm_info *hw_cdm_info =
+			lrme_core->hw_cdm_info;
+
+		rc = cam_cdm_stream_off(hw_cdm_info->cdm_handle);
+		if (rc) {
+			CAM_ERR(CAM_LRME,
+				"Failed in CDM StreamOff, handle=0x%x, rc=%d",
+				hw_cdm_info->cdm_handle, rc);
+			goto unlock;
+		}
+	}
+
+	rc = cam_lrme_soc_disable_resources(lrme_hw);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed in Disable SOC, rc=%d", rc);
+
+	lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	if (lrme_core->state == CAM_LRME_CORE_STATE_IDLE) {
+		lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+	} else {
+		CAM_ERR(CAM_LRME, "HW in wrong state %d", lrme_core->state);
+		return -EINVAL;
+	}
+
+unlock:
+	mutex_unlock(&lrme_hw->hw_mutex);
+	return rc;
+}
+
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+	struct cam_lrme_core *lrme_core;
+	struct cam_lrme_hw_submit_args *args =
+		(struct cam_lrme_hw_submit_args *)hw_submit_args;
+	int rc = 0;
+	struct cam_lrme_frame_request *frame_req;
+
+
+	if (!hw_priv || !hw_submit_args) {
+		CAM_ERR(CAM_LRME, "Invalid input");
+		return -EINVAL;
+	}
+
+	if (sizeof(struct cam_lrme_hw_submit_args) != arg_size) {
+		CAM_ERR(CAM_LRME,
+			"size of args %zu, arg_size %d",
+			sizeof(struct cam_lrme_hw_submit_args), arg_size);
+		return -EINVAL;
+	}
+
+	frame_req = args->frame_req;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (lrme_hw->open_count == 0) {
+		CAM_ERR(CAM_LRME, "HW is not open");
+		mutex_unlock(&lrme_hw->hw_mutex);
+		return -EINVAL;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	if (lrme_core->state != CAM_LRME_CORE_STATE_IDLE &&
+		lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_DBG(CAM_LRME, "device busy, can not submit, state %d",
+			lrme_core->state);
+		return -EBUSY;
+	}
+
+	if (lrme_core->req_submit != NULL) {
+		CAM_ERR(CAM_LRME, "req_submit is not NULL");
+		return -EBUSY;
+	}
+
+	rc = cam_lrme_hw_util_submit_req(lrme_core, frame_req);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Submit req failed");
+		goto error;
+	}
+
+	switch (lrme_core->state) {
+	case CAM_LRME_CORE_STATE_PROCESSING:
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PROC_PEND;
+		break;
+
+	case CAM_LRME_CORE_STATE_IDLE:
+		cam_lrme_hw_util_submit_go(lrme_hw);
+		lrme_core->state = CAM_LRME_CORE_STATE_REQ_PENDING;
+		break;
+
+	default:
+		CAM_ERR(CAM_LRME, "Wrong hw state");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	lrme_core->req_submit = frame_req;
+
+	mutex_unlock(&lrme_hw->hw_mutex);
+	CAM_DBG(CAM_LRME, "Release lock, submit done for req %llu",
+		frame_req->req_id);
+
+	return 0;
+
+error:
+	mutex_unlock(&lrme_hw->hw_mutex);
+
+	return rc;
+
+}
+
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = hw_priv;
+	struct cam_lrme_core *lrme_core;
+	struct cam_lrme_hw_reset_args *lrme_reset_args = reset_core_args;
+	int rc;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_LRME, "Invalid input args");
+		return -EINVAL;
+	}
+
+	if (!reset_core_args ||
+		sizeof(struct cam_lrme_hw_reset_args) != arg_size) {
+		CAM_ERR(CAM_LRME, "Invalid reset args");
+		return -EINVAL;
+	}
+
+	lrme_core = lrme_hw->core_info;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+	if (lrme_core->state == CAM_LRME_CORE_STATE_RECOVERY) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_ERR(CAM_LRME, "Reset not allowed in %d state",
+			lrme_core->state);
+		return -EINVAL;
+	}
+
+	lrme_core->state = CAM_LRME_CORE_STATE_RECOVERY;
+
+	rc = cam_lrme_hw_util_reset(lrme_hw, lrme_reset_args->reset_type);
+	if (rc) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_ERR(CAM_FD, "Failed to reset");
+		return rc;
+	}
+
+	lrme_core->state = CAM_LRME_CORE_STATE_IDLE;
+
+	mutex_unlock(&lrme_hw->hw_mutex);
+
+	return 0;
+}
+
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size)
+{
+	struct cam_lrme_core         *lrme_core = NULL;
+	struct cam_hw_info           *lrme_hw = hw_priv;
+	struct cam_lrme_hw_flush_args *flush_args =
+		(struct cam_lrme_hw_flush_args *)hw_flush_args;
+	int rc = -ENODEV;
+
+	if (!hw_priv) {
+		CAM_ERR(CAM_LRME, "Invalid arguments %pK", hw_priv);
+		return -EINVAL;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+
+	mutex_lock(&lrme_hw->hw_mutex);
+
+	if (lrme_core->state != CAM_LRME_CORE_STATE_PROCESSING &&
+		lrme_core->state != CAM_LRME_CORE_STATE_REQ_PENDING &&
+		lrme_core->state != CAM_LRME_CORE_STATE_REQ_PROC_PEND) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_DBG(CAM_LRME, "Flush is not needed in %d state",
+			lrme_core->state);
+		return 0;
+	}
+
+	if (!lrme_core->req_proc && !lrme_core->req_submit) {
+		mutex_unlock(&lrme_hw->hw_mutex);
+		CAM_DBG(CAM_LRME, "no req in device");
+		return 0;
+	}
+
+	switch (flush_args->flush_type) {
+	case CAM_FLUSH_TYPE_ALL:
+		if ((!lrme_core->req_submit ||
+			lrme_core->req_submit->ctxt_to_hw_map !=
+			flush_args->ctxt_to_hw_map) &&
+			(!lrme_core->req_proc ||
+			lrme_core->req_proc->ctxt_to_hw_map !=
+			flush_args->ctxt_to_hw_map)) {
+			mutex_unlock(&lrme_hw->hw_mutex);
+			CAM_DBG(CAM_LRME, "hw running on different ctx");
+			return 0;
+		}
+		rc = cam_lrme_hw_util_flush_ctx(lrme_hw,
+			flush_args->ctxt_to_hw_map);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Flush all failed");
+		break;
+
+	case CAM_FLUSH_TYPE_REQ:
+		if ((!lrme_core->req_submit ||
+			lrme_core->req_submit != flush_args->req_to_flush) &&
+			(!lrme_core->req_proc ||
+			lrme_core->req_proc != flush_args->req_to_flush)) {
+			mutex_unlock(&lrme_hw->hw_mutex);
+			CAM_DBG(CAM_LRME, "hw running on different ctx");
+			return 0;
+		}
+		rc = cam_lrme_hw_util_flush_req(lrme_hw,
+			flush_args->req_to_flush);
+		if (rc)
+			CAM_ERR(CAM_LRME, "Flush req failed");
+		break;
+
+	default:
+		CAM_ERR(CAM_LRME, "Unsupported flush type");
+		break;
+	}
+
+	mutex_unlock(&lrme_hw->hw_mutex);
+
+	return rc;
+}
+
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw;
+	struct cam_lrme_core *lrme_core;
+	struct cam_lrme_dev_cap *lrme_hw_caps =
+		(struct cam_lrme_dev_cap *)get_hw_cap_args;
+
+	if (!hw_priv || !get_hw_cap_args) {
+		CAM_ERR(CAM_LRME, "Invalid input pointers %pK %pK",
+			hw_priv, get_hw_cap_args);
+		return -EINVAL;
+	}
+
+	lrme_hw = (struct cam_hw_info *)hw_priv;
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	*lrme_hw_caps = lrme_core->hw_caps;
+
+	return 0;
+}
+
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data)
+{
+	struct cam_hw_info *lrme_hw;
+	struct cam_lrme_core *lrme_core;
+	struct cam_hw_soc_info *soc_info;
+	struct cam_lrme_hw_info   *hw_info;
+	struct crm_workq_task *task;
+	struct cam_lrme_hw_work_data *work_data;
+	uint32_t top_irq_status, fe_irq_status, we_irq_status0, we_irq_status1;
+	int rc;
+
+	if (!data) {
+		CAM_ERR(CAM_LRME, "Invalid data in IRQ callback");
+		return IRQ_NONE;
+	}
+
+	lrme_hw = (struct cam_hw_info *)data;
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	soc_info = &lrme_hw->soc_info;
+	hw_info = lrme_core->hw_info;
+
+	top_irq_status = cam_io_r_mb(
+		soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_irq_status);
+	CAM_DBG(CAM_LRME, "top_irq_status %x", top_irq_status);
+	cam_io_w_mb(top_irq_status,
+		soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_irq_clear);
+	top_irq_status &= CAM_LRME_TOP_IRQ_MASK;
+
+	fe_irq_status = cam_io_r_mb(
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.irq_status);
+	CAM_DBG(CAM_LRME, "fe_irq_status %x", fe_irq_status);
+	cam_io_w_mb(fe_irq_status,
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.irq_clear);
+	fe_irq_status &= CAM_LRME_FE_IRQ_MASK;
+
+	we_irq_status0 = cam_io_r_mb(
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_status_0);
+	CAM_DBG(CAM_LRME, "we_irq_status[0] %x", we_irq_status0);
+	cam_io_w_mb(we_irq_status0,
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_clear_0);
+	we_irq_status0 &= CAM_LRME_WE_IRQ_MASK_0;
+
+	we_irq_status1 = cam_io_r_mb(
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_status_1);
+	CAM_DBG(CAM_LRME, "we_irq_status[1] %x", we_irq_status1);
+	cam_io_w_mb(we_irq_status1,
+		soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_clear_1);
+	we_irq_status1 &= CAM_LRME_WE_IRQ_MASK_1;
+
+	cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+		hw_info->titan_reg.top_irq_cmd);
+	cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+		hw_info->bus_wr_reg.common_reg.irq_cmd);
+	cam_io_w_mb(0x1, soc_info->reg_map[0].mem_base +
+		hw_info->bus_rd_reg.common_reg.irq_cmd);
+
+	if (top_irq_status & 0x1) {
+		complete(&lrme_core->reset_complete);
+		top_irq_status &= (~0x1);
+	}
+
+	if (top_irq_status || fe_irq_status ||
+		we_irq_status0 || we_irq_status1) {
+		task = cam_req_mgr_workq_get_task(lrme_core->work);
+		if (!task) {
+			CAM_ERR(CAM_LRME, "no empty task available");
+			return IRQ_NONE;
+		}
+		work_data = (struct cam_lrme_hw_work_data *)task->payload;
+		work_data->top_irq_status = top_irq_status;
+		work_data->fe_irq_status = fe_irq_status;
+		work_data->we_irq_status[0] = we_irq_status0;
+		work_data->we_irq_status[1] = we_irq_status1;
+		task->process_cb = cam_lrme_hw_process_irq;
+		rc = cam_req_mgr_workq_enqueue_task(task, data,
+			CRM_TASK_PRIORITY_0);
+		if (rc)
+			CAM_ERR(CAM_LRME,
+				"Failed in enqueue work task, rc=%d", rc);
+	}
+
+	return IRQ_HANDLED;
+}
+
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size)
+{
+	struct cam_hw_info *lrme_hw = (struct cam_hw_info *)hw_priv;
+	int rc = 0;
+
+	switch (cmd_type) {
+	case CAM_LRME_HW_CMD_PREPARE_HW_UPDATE: {
+		struct cam_lrme_hw_cmd_config_args *config_args;
+
+		config_args = (struct cam_lrme_hw_cmd_config_args *)cmd_args;
+		rc = cam_lrme_hw_util_process_config_hw(lrme_hw, config_args);
+		break;
+	}
+
+	case CAM_LRME_HW_CMD_REGISTER_CB: {
+		struct cam_lrme_hw_cmd_set_cb *cb_args;
+		struct cam_lrme_device *hw_device;
+		struct cam_lrme_core *lrme_core =
+			(struct cam_lrme_core *)lrme_hw->core_info;
+		cb_args = (struct cam_lrme_hw_cmd_set_cb *)cmd_args;
+		lrme_core->hw_mgr_cb.cam_lrme_hw_mgr_cb =
+			cb_args->cam_lrme_hw_mgr_cb;
+		lrme_core->hw_mgr_cb.data = cb_args->data;
+		hw_device = cb_args->data;
+		rc = 0;
+		break;
+	}
+
+	case CAM_LRME_HW_CMD_SUBMIT: {
+		struct cam_lrme_hw_submit_args *submit_args;
+
+		submit_args = (struct cam_lrme_hw_submit_args *)cmd_args;
+		rc = cam_lrme_hw_submit_req(hw_priv,
+			submit_args, arg_size);
+		break;
+	}
+
+	case CAM_LRME_HW_CMD_DUMP_REGISTER: {
+		struct cam_lrme_core *lrme_core =
+			(struct cam_lrme_core *)lrme_hw->core_info;
+		lrme_core->dump_flag = *(bool *)cmd_args;
+		CAM_DBG(CAM_LRME, "dump_flag %d", lrme_core->dump_flag);
+		break;
+	}
+
+	default:
+		break;
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
new file mode 100644
index 0000000..219cf13
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_core.h
@@ -0,0 +1,451 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_LRME_HW_CORE_H_
+#define _CAM_LRME_HW_CORE_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_defs.h>
+#include <media/cam_lrme.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+#include "cam_io_util.h"
+#include "cam_cpas_api.h"
+#include "cam_cdm_intf_api.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_req_mgr_workq.h"
+
+#define CAM_LRME_HW_RESET_TIMEOUT 3000
+
+#define CAM_LRME_BUS_RD_MAX_CLIENTS 2
+#define CAM_LRME_BUS_WR_MAX_CLIENTS 2
+
+#define CAM_LRME_HW_WORKQ_NUM_TASK 30
+
+#define CAM_LRME_TOP_IRQ_MASK          0x19
+#define CAM_LRME_WE_IRQ_MASK_0         0x2
+#define CAM_LRME_WE_IRQ_MASK_1         0x0
+#define CAM_LRME_FE_IRQ_MASK           0x0
+
+#define CAM_LRME_MAX_REG_PAIR_NUM 60
+
+/**
+ * enum cam_lrme_irq_set
+ *
+ * @CAM_LRME_IRQ_ENABLE  : Enable irqs
+ * @CAM_LRME_IRQ_DISABLE : Disable irqs
+ */
+enum cam_lrme_irq_set {
+	CAM_LRME_IRQ_ENABLE,
+	CAM_LRME_IRQ_DISABLE,
+};
+
+/**
+ * struct cam_lrme_cdm_info : information used to submit cdm command
+ *
+ * @cdm_handle      : CDM handle for this device
+ * @cdm_ops         : CDM ops
+ * @cdm_cmd         : CDM command pointer
+ */
+struct cam_lrme_cdm_info {
+	uint32_t                   cdm_handle;
+	struct cam_cdm_utils_ops  *cdm_ops;
+	struct cam_cdm_bl_request *cdm_cmd;
+};
+
+/**
+ * struct cam_lrme_hw_work_data : Work data for HW work queue
+ *
+ * @top_irq_status : Top registers irq status
+ * @fe_irq_status  : FE engine irq status
+ * @we_irq_status  : WE engine irq status
+ */
+struct cam_lrme_hw_work_data {
+	uint32_t                          top_irq_status;
+	uint32_t                          fe_irq_status;
+	uint32_t                          we_irq_status[2];
+};
+
+/**
+ *  enum cam_lrme_core_state : LRME core states
+ *
+ * @CAM_LRME_CORE_STATE_UNINIT        : LRME is in uninit state
+ * @CAM_LRME_CORE_STATE_INIT          : LRME is in init state after probe
+ * @ CAM_LRME_CORE_STATE_IDLE         : LRME is in idle state. Hardware is in
+ *                                      this state when no frame is processing
+ *                                      or waiting for this core.
+ * @CAM_LRME_CORE_STATE_REQ_PENDING   : LRME is in pending state. One frame is
+ *                                      waiting for processing
+ * @CAM_LRME_CORE_STATE_PROCESSING    : LRME is in processing state. HW manager
+ *                                      can submit one more frame to HW
+ * @CAM_LRME_CORE_STATE_REQ_PROC_PEND : Indicate two frames are inside HW.
+ * @CAM_LRME_CORE_STATE_RECOVERY      : Indicate core is in the process of reset
+ * @CAM_LRME_CORE_STATE_MAX           : upper limit of states
+ */
+enum cam_lrme_core_state {
+	CAM_LRME_CORE_STATE_UNINIT,
+	CAM_LRME_CORE_STATE_INIT,
+	CAM_LRME_CORE_STATE_IDLE,
+	CAM_LRME_CORE_STATE_REQ_PENDING,
+	CAM_LRME_CORE_STATE_PROCESSING,
+	CAM_LRME_CORE_STATE_REQ_PROC_PEND,
+	CAM_LRME_CORE_STATE_RECOVERY,
+	CAM_LRME_CORE_STATE_MAX,
+};
+
+/**
+ *  struct cam_lrme_core : LRME HW core information
+ *
+ * @hw_info        : Pointer to base HW information structure
+ * @device_iommu   : Device iommu handle
+ * @cdm_iommu      : CDM iommu handle
+ * @hw_caps        : Hardware capabilities
+ * @state          : Hardware state
+ * @reset_complete : Reset completion
+ * @work           : Hardware workqueue to handle irq events
+ * @work_data      : Work data used by hardware workqueue
+ * @hw_mgr_cb      : Hw manager callback
+ * @req_proc       : Pointer to the processing frame request
+ * @req_submit     : Pointer to the frame request waiting for processing
+ * @hw_cdm_info    : CDM information used by this device
+ * @hw_idx         : Hardware index
+ */
+struct cam_lrme_core {
+	struct cam_lrme_hw_info          *hw_info;
+	struct cam_iommu_handle           device_iommu;
+	struct cam_iommu_handle           cdm_iommu;
+	struct cam_lrme_dev_cap           hw_caps;
+	enum cam_lrme_core_state          state;
+	struct completion                 reset_complete;
+	struct cam_req_mgr_core_workq    *work;
+	struct cam_lrme_hw_work_data      work_data[CAM_LRME_HW_WORKQ_NUM_TASK];
+	struct cam_lrme_hw_cmd_set_cb     hw_mgr_cb;
+	struct cam_lrme_frame_request    *req_proc;
+	struct cam_lrme_frame_request    *req_submit;
+	struct cam_lrme_cdm_info         *hw_cdm_info;
+	uint32_t                          hw_idx;
+	bool                              dump_flag;
+};
+
+/**
+ * struct cam_lrme_bus_rd_reg_common : Offsets of FE common registers
+ *
+ * @hw_version    : Offset of hw_version register
+ * @hw_capability : Offset of hw_capability register
+ * @sw_reset      : Offset of sw_reset register
+ * @cgc_override  : Offset of cgc_override register
+ * @irq_mask      : Offset of irq_mask register
+ * @irq_clear     : Offset of irq_clear register
+ * @irq_cmd       : Offset of irq_cmd register
+ * @irq_status    : Offset of irq_status register
+ * @cmd           : Offset of cmd register
+ * @irq_set       : Offset of irq_set register
+ * @misr_reset    : Offset of misr_reset register
+ * @security_cfg  : Offset of security_cfg register
+ * @pwr_iso_cfg   : Offset of pwr_iso_cfg register
+ * @pwr_iso_seed  : Offset of pwr_iso_seed register
+ * @test_bus_ctrl : Offset of test_bus_ctrl register
+ * @spare         : Offset of spare register
+ */
+struct cam_lrme_bus_rd_reg_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t sw_reset;
+	uint32_t cgc_override;
+	uint32_t irq_mask;
+	uint32_t irq_clear;
+	uint32_t irq_cmd;
+	uint32_t irq_status;
+	uint32_t cmd;
+	uint32_t irq_set;
+	uint32_t misr_reset;
+	uint32_t security_cfg;
+	uint32_t pwr_iso_cfg;
+	uint32_t pwr_iso_seed;
+	uint32_t test_bus_ctrl;
+	uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_wr_reg_common : Offset of WE common registers
+ * @hw_version        : Offset of hw_version register
+ * @hw_capability     : Offset of hw_capability register
+ * @sw_reset          : Offset of sw_reset register
+ * @cgc_override      : Offset of cgc_override register
+ * @misr_reset        : Offset of misr_reset register
+ * @pwr_iso_cfg       : Offset of pwr_iso_cfg register
+ * @test_bus_ctrl     : Offset of test_bus_ctrl register
+ * @composite_mask_0  : Offset of composite_mask_0 register
+ * @irq_mask_0        : Offset of irq_mask_0 register
+ * @irq_mask_1        : Offset of irq_mask_1 register
+ * @irq_clear_0       : Offset of irq_clear_0 register
+ * @irq_clear_1       : Offset of irq_clear_1 register
+ * @irq_status_0      : Offset of irq_status_0 register
+ * @irq_status_1      : Offset of irq_status_1 register
+ * @irq_cmd           : Offset of irq_cmd register
+ * @irq_set_0         : Offset of irq_set_0 register
+ * @irq_set_1         : Offset of irq_set_1 register
+ * @addr_fifo_status  : Offset of addr_fifo_status register
+ * @frame_header_cfg0 : Offset of frame_header_cfg0 register
+ * @frame_header_cfg1 : Offset of frame_header_cfg1 register
+ * @spare             : Offset of spare register
+ */
+struct cam_lrme_bus_wr_reg_common {
+	uint32_t hw_version;
+	uint32_t hw_capability;
+	uint32_t sw_reset;
+	uint32_t cgc_override;
+	uint32_t misr_reset;
+	uint32_t pwr_iso_cfg;
+	uint32_t test_bus_ctrl;
+	uint32_t composite_mask_0;
+	uint32_t irq_mask_0;
+	uint32_t irq_mask_1;
+	uint32_t irq_clear_0;
+	uint32_t irq_clear_1;
+	uint32_t irq_status_0;
+	uint32_t irq_status_1;
+	uint32_t irq_cmd;
+	uint32_t irq_set_0;
+	uint32_t irq_set_1;
+	uint32_t addr_fifo_status;
+	uint32_t frame_header_cfg0;
+	uint32_t frame_header_cfg1;
+	uint32_t spare;
+};
+
+/**
+ * struct cam_lrme_bus_rd_bus_client : Offset of FE registers
+ *
+ * @core_cfg                : Offset of core_cfg register
+ * @ccif_meta_data          : Offset of ccif_meta_data register
+ * @addr_image              : Offset of addr_image register
+ * @rd_buffer_size          : Offset of rd_buffer_size register
+ * @rd_stride               : Offset of rd_stride register
+ * @unpack_cfg_0            : Offset of unpack_cfg_0 register
+ * @latency_buff_allocation : Offset of latency_buff_allocation register
+ * @burst_limit_cfg         : Offset of burst_limit_cfg register
+ * @misr_cfg_0              : Offset of misr_cfg_0 register
+ * @misr_cfg_1              : Offset of misr_cfg_1 register
+ * @misr_rd_val             : Offset of misr_rd_val register
+ * @debug_status_cfg        : Offset of debug_status_cfg register
+ * @debug_status_0          : Offset of debug_status_0 register
+ * @debug_status_1          : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_rd_bus_client {
+	uint32_t core_cfg;
+	uint32_t ccif_meta_data;
+	uint32_t addr_image;
+	uint32_t rd_buffer_size;
+	uint32_t rd_stride;
+	uint32_t unpack_cfg_0;
+	uint32_t latency_buff_allocation;
+	uint32_t burst_limit_cfg;
+	uint32_t misr_cfg_0;
+	uint32_t misr_cfg_1;
+	uint32_t misr_rd_val;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_wr_bus_client : Offset of WE registers
+ *
+ * @status_0                  : Offset of status_0 register
+ * @status_1                  : Offset of status_1 register
+ * @cfg                       : Offset of cfg register
+ * @addr_frame_header         : Offset of addr_frame_header register
+ * @frame_header_cfg          : Offset of frame_header_cfg register
+ * @addr_image                : Offset of addr_image register
+ * @addr_image_offset         : Offset of addr_image_offset register
+ * @buffer_width_cfg          : Offset of buffer_width_cfg register
+ * @buffer_height_cfg         : Offset of buffer_height_cfg register
+ * @packer_cfg                : Offset of packer_cfg register
+ * @wr_stride                 : Offset of wr_stride register
+ * @irq_subsample_cfg_period  : Offset of irq_subsample_cfg_period register
+ * @irq_subsample_cfg_pattern : Offset of irq_subsample_cfg_pattern register
+ * @burst_limit_cfg           : Offset of burst_limit_cfg register
+ * @misr_cfg                  : Offset of misr_cfg register
+ * @misr_rd_word_sel          : Offset of misr_rd_word_sel register
+ * @misr_val                  : Offset of misr_val register
+ * @debug_status_cfg          : Offset of debug_status_cfg register
+ * @debug_status_0            : Offset of debug_status_0 register
+ * @debug_status_1            : Offset of debug_status_1 register
+ */
+struct cam_lrme_bus_wr_bus_client {
+	uint32_t status_0;
+	uint32_t status_1;
+	uint32_t cfg;
+	uint32_t addr_frame_header;
+	uint32_t frame_header_cfg;
+	uint32_t addr_image;
+	uint32_t addr_image_offset;
+	uint32_t buffer_width_cfg;
+	uint32_t buffer_height_cfg;
+	uint32_t packer_cfg;
+	uint32_t wr_stride;
+	uint32_t irq_subsample_cfg_period;
+	uint32_t irq_subsample_cfg_pattern;
+	uint32_t burst_limit_cfg;
+	uint32_t misr_cfg;
+	uint32_t misr_rd_word_sel;
+	uint32_t misr_val;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+};
+
+/**
+ * struct cam_lrme_bus_rd_hw_info : FE registers information
+ *
+ * @common_reg     : FE common register
+ * @bus_client_reg : List of FE bus registers information
+ */
+struct cam_lrme_bus_rd_hw_info {
+	struct cam_lrme_bus_rd_reg_common common_reg;
+	struct cam_lrme_bus_rd_bus_client
+		bus_client_reg[CAM_LRME_BUS_RD_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_bus_wr_hw_info : WE engine registers information
+ *
+ * @common_reg     : WE common register
+ * @bus_client_reg : List of WE bus registers information
+ */
+struct cam_lrme_bus_wr_hw_info {
+	struct cam_lrme_bus_wr_reg_common common_reg;
+	struct cam_lrme_bus_wr_bus_client
+		bus_client_reg[CAM_LRME_BUS_WR_MAX_CLIENTS];
+};
+
+/**
+ * struct cam_lrme_clc_reg : Offset of clc registers
+ *
+ * @clc_hw_version                 : Offset of clc_hw_version register
+ * @clc_hw_status                  : Offset of clc_hw_status register
+ * @clc_hw_status_dbg              : Offset of clc_hw_status_dbg register
+ * @clc_module_cfg                 : Offset of clc_module_cfg register
+ * @clc_moduleformat               : Offset of clc_moduleformat register
+ * @clc_rangestep                  : Offset of clc_rangestep register
+ * @clc_offset                     : Offset of clc_offset register
+ * @clc_maxallowedsad              : Offset of clc_maxallowedsad register
+ * @clc_minallowedtarmad           : Offset of clc_minallowedtarmad register
+ * @clc_meaningfulsaddiff          : Offset of clc_meaningfulsaddiff register
+ * @clc_minsaddiffdenom            : Offset of clc_minsaddiffdenom register
+ * @clc_robustnessmeasuredistmap_0 : Offset of measuredistmap_0 register
+ * @clc_robustnessmeasuredistmap_1 : Offset of measuredistmap_1 register
+ * @clc_robustnessmeasuredistmap_2 : Offset of measuredistmap_2 register
+ * @clc_robustnessmeasuredistmap_3 : Offset of measuredistmap_3 register
+ * @clc_robustnessmeasuredistmap_4 : Offset of measuredistmap_4 register
+ * @clc_robustnessmeasuredistmap_5 : Offset of measuredistmap_5 register
+ * @clc_robustnessmeasuredistmap_6 : Offset of measuredistmap_6 register
+ * @clc_robustnessmeasuredistmap_7 : Offset of measuredistmap_7 register
+ * @clc_ds_crop_horizontal         : Offset of clc_ds_crop_horizontal register
+ * @clc_ds_crop_vertical           : Offset of clc_ds_crop_vertical register
+ * @clc_tar_pd_unpacker            : Offset of clc_tar_pd_unpacker register
+ * @clc_ref_pd_unpacker            : Offset of clc_ref_pd_unpacker register
+ * @clc_sw_override                : Offset of clc_sw_override register
+ * @clc_tar_height                 : Offset of clc_tar_height register
+ * @clc_test_bus_ctrl              : Offset of clc_test_bus_ctrl register
+ * @clc_spare                      : Offset of clc_spare register
+ */
+struct cam_lrme_clc_reg {
+	uint32_t clc_hw_version;
+	uint32_t clc_hw_status;
+	uint32_t clc_hw_status_dbg;
+	uint32_t clc_module_cfg;
+	uint32_t clc_moduleformat;
+	uint32_t clc_rangestep;
+	uint32_t clc_offset;
+	uint32_t clc_maxallowedsad;
+	uint32_t clc_minallowedtarmad;
+	uint32_t clc_meaningfulsaddiff;
+	uint32_t clc_minsaddiffdenom;
+	uint32_t clc_robustnessmeasuredistmap_0;
+	uint32_t clc_robustnessmeasuredistmap_1;
+	uint32_t clc_robustnessmeasuredistmap_2;
+	uint32_t clc_robustnessmeasuredistmap_3;
+	uint32_t clc_robustnessmeasuredistmap_4;
+	uint32_t clc_robustnessmeasuredistmap_5;
+	uint32_t clc_robustnessmeasuredistmap_6;
+	uint32_t clc_robustnessmeasuredistmap_7;
+	uint32_t clc_ds_crop_horizontal;
+	uint32_t clc_ds_crop_vertical;
+	uint32_t clc_tar_pd_unpacker;
+	uint32_t clc_ref_pd_unpacker;
+	uint32_t clc_sw_override;
+	uint32_t clc_tar_height;
+	uint32_t clc_ref_height;
+	uint32_t clc_test_bus_ctrl;
+	uint32_t clc_spare;
+};
+
+/**
+ * struct cam_lrme_titan_reg : Offset of LRME top registers
+ *
+ * @top_hw_version           : Offset of top_hw_version register
+ * @top_titan_version        : Offset of top_titan_version register
+ * @top_rst_cmd              : Offset of top_rst_cmd register
+ * @top_core_clk_cfg         : Offset of top_core_clk_cfg register
+ * @top_irq_status           : Offset of top_irq_status register
+ * @top_irq_mask             : Offset of top_irq_mask register
+ * @top_irq_clear            : Offset of top_irq_clear register
+ * @top_irq_set              : Offset of top_irq_set register
+ * @top_irq_cmd              : Offset of top_irq_cmd register
+ * @top_violation_status     : Offset of top_violation_status register
+ * @top_spare                : Offset of top_spare register
+ */
+struct cam_lrme_titan_reg {
+	uint32_t top_hw_version;
+	uint32_t top_titan_version;
+	uint32_t top_rst_cmd;
+	uint32_t top_core_clk_cfg;
+	uint32_t top_irq_status;
+	uint32_t top_irq_mask;
+	uint32_t top_irq_clear;
+	uint32_t top_irq_set;
+	uint32_t top_irq_cmd;
+	uint32_t top_violation_status;
+	uint32_t top_spare;
+};
+
+/**
+ * struct cam_lrme_hw_info : LRME registers information
+ *
+ * @clc_reg    : LRME CLC registers
+ * @bus_rd_reg : LRME FE registers
+ * @bus_wr_reg : LRME WE registers
+ * @titan_reg  : LRME top reisters
+ */
+struct cam_lrme_hw_info {
+	struct cam_lrme_clc_reg clc_reg;
+	struct cam_lrme_bus_rd_hw_info bus_rd_reg;
+	struct cam_lrme_bus_wr_hw_info bus_wr_reg;
+	struct cam_lrme_titan_reg titan_reg;
+};
+
+int cam_lrme_hw_process_irq(void *priv, void *data);
+int cam_lrme_hw_submit_req(void *hw_priv, void *hw_submit_args,
+	uint32_t arg_size);
+int cam_lrme_hw_reset(void *hw_priv, void *reset_core_args, uint32_t arg_size);
+int cam_lrme_hw_stop(void *hw_priv, void *stop_args, uint32_t arg_size);
+int cam_lrme_hw_get_caps(void *hw_priv, void *get_hw_cap_args,
+	uint32_t arg_size);
+irqreturn_t cam_lrme_hw_irq(int irq_num, void *data);
+int cam_lrme_hw_process_cmd(void *hw_priv, uint32_t cmd_type,
+	void *cmd_args, uint32_t arg_size);
+int cam_lrme_hw_util_get_caps(struct cam_hw_info *lrme_hw,
+	struct cam_lrme_dev_cap *hw_caps);
+int cam_lrme_hw_start(void *hw_priv, void *hw_init_args, uint32_t arg_size);
+int cam_lrme_hw_flush(void *hw_priv, void *hw_flush_args, uint32_t arg_size);
+void cam_lrme_set_irq(struct cam_hw_info *lrme_hw, enum cam_lrme_irq_set set);
+
+#endif /* _CAM_LRME_HW_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
new file mode 100644
index 0000000..e390e54
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_dev.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_req_mgr.h>
+
+#include "cam_subdev.h"
+#include "cam_lrme_hw_intf.h"
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+#include "cam_lrme_hw_reg.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_lrme_hw_mgr.h"
+#include "cam_mem_mgr_api.h"
+#include "cam_smmu_api.h"
+
+static int cam_lrme_hw_dev_util_cdm_acquire(struct cam_lrme_core *lrme_core,
+	struct cam_hw_info *lrme_hw)
+{
+	int rc, i;
+	struct cam_cdm_bl_request *cdm_cmd;
+	struct cam_cdm_acquire_data cdm_acquire;
+	struct cam_lrme_cdm_info *hw_cdm_info;
+
+	hw_cdm_info = kzalloc(sizeof(struct cam_lrme_cdm_info),
+		GFP_KERNEL);
+	if (!hw_cdm_info) {
+		CAM_ERR(CAM_LRME, "No memory for hw_cdm_info");
+		return -ENOMEM;
+	}
+
+	cdm_cmd = kzalloc((sizeof(struct cam_cdm_bl_request) +
+		((CAM_LRME_MAX_HW_ENTRIES - 1) *
+		sizeof(struct cam_cdm_bl_cmd))), GFP_KERNEL);
+	if (!cdm_cmd) {
+		CAM_ERR(CAM_LRME, "No memory for cdm_cmd");
+		kfree(hw_cdm_info);
+		return -ENOMEM;
+	}
+
+	memset(&cdm_acquire, 0, sizeof(cdm_acquire));
+	strlcpy(cdm_acquire.identifier, "lrmecdm", sizeof("lrmecdm"));
+	cdm_acquire.cell_index = lrme_hw->soc_info.index;
+	cdm_acquire.handle = 0;
+	cdm_acquire.userdata = hw_cdm_info;
+	cdm_acquire.cam_cdm_callback = NULL;
+	cdm_acquire.id = CAM_CDM_VIRTUAL;
+	cdm_acquire.base_array_cnt = lrme_hw->soc_info.num_reg_map;
+	for (i = 0; i < lrme_hw->soc_info.num_reg_map; i++)
+		cdm_acquire.base_array[i] = &lrme_hw->soc_info.reg_map[i];
+
+	rc = cam_cdm_acquire(&cdm_acquire);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Can't acquire cdm");
+		goto error;
+	}
+
+	hw_cdm_info->cdm_cmd = cdm_cmd;
+	hw_cdm_info->cdm_ops = cdm_acquire.ops;
+	hw_cdm_info->cdm_handle = cdm_acquire.handle;
+
+	lrme_core->hw_cdm_info = hw_cdm_info;
+	CAM_DBG(CAM_LRME, "cdm acquire done");
+
+	return 0;
+error:
+	kfree(cdm_cmd);
+	kfree(hw_cdm_info);
+	return rc;
+}
+
+static int cam_lrme_hw_dev_probe(struct platform_device *pdev)
+{
+	struct cam_hw_info *lrme_hw;
+	struct cam_hw_intf lrme_hw_intf;
+	struct cam_lrme_core *lrme_core;
+	const struct of_device_id *match_dev = NULL;
+	struct cam_lrme_hw_info *hw_info;
+	int rc, i;
+
+	lrme_hw = kzalloc(sizeof(struct cam_hw_info), GFP_KERNEL);
+	if (!lrme_hw) {
+		CAM_ERR(CAM_LRME, "No memory to create lrme_hw");
+		return -ENOMEM;
+	}
+
+	lrme_core = kzalloc(sizeof(struct cam_lrme_core), GFP_KERNEL);
+	if (!lrme_core) {
+		CAM_ERR(CAM_LRME, "No memory to create lrme_core");
+		kfree(lrme_hw);
+		return -ENOMEM;
+	}
+
+	lrme_hw->core_info = lrme_core;
+	lrme_hw->hw_state = CAM_HW_STATE_POWER_DOWN;
+	lrme_hw->soc_info.pdev = pdev;
+	lrme_hw->soc_info.dev = &pdev->dev;
+	lrme_hw->soc_info.dev_name = pdev->name;
+	lrme_hw->open_count = 0;
+	lrme_core->state = CAM_LRME_CORE_STATE_INIT;
+
+	mutex_init(&lrme_hw->hw_mutex);
+	spin_lock_init(&lrme_hw->hw_lock);
+	init_completion(&lrme_hw->hw_complete);
+	init_completion(&lrme_core->reset_complete);
+
+	rc = cam_req_mgr_workq_create("cam_lrme_hw_worker",
+		CAM_LRME_HW_WORKQ_NUM_TASK,
+		&lrme_core->work, CRM_WORKQ_USAGE_IRQ, 0);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Unable to create a workq, rc=%d", rc);
+		goto free_memory;
+	}
+
+	for (i = 0; i < CAM_LRME_HW_WORKQ_NUM_TASK; i++)
+		lrme_core->work->task.pool[i].payload =
+			&lrme_core->work_data[i];
+
+	match_dev = of_match_device(pdev->dev.driver->of_match_table,
+		&pdev->dev);
+	if (!match_dev || !match_dev->data) {
+		CAM_ERR(CAM_LRME, "No Of_match data, %pK", match_dev);
+		rc = -EINVAL;
+		goto destroy_workqueue;
+	}
+	hw_info = (struct cam_lrme_hw_info *)match_dev->data;
+	lrme_core->hw_info = hw_info;
+
+	rc = cam_lrme_soc_init_resources(&lrme_hw->soc_info,
+		cam_lrme_hw_irq, lrme_hw);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to init soc, rc=%d", rc);
+		goto destroy_workqueue;
+	}
+
+	rc = cam_lrme_hw_dev_util_cdm_acquire(lrme_core, lrme_hw);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to acquire cdm");
+		goto deinit_platform_res;
+	}
+
+	rc = cam_smmu_get_handle("lrme", &lrme_core->device_iommu.non_secure);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Get iommu handle failed");
+		goto release_cdm;
+	}
+
+	rc = cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_ATTACH);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "LRME attach iommu handle failed, rc=%d", rc);
+		goto destroy_smmu;
+	}
+
+	rc = cam_lrme_hw_start(lrme_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to hw init, rc=%d", rc);
+		goto detach_smmu;
+	}
+
+	rc = cam_lrme_hw_util_get_caps(lrme_hw, &lrme_core->hw_caps);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to get hw caps, rc=%d", rc);
+		if (cam_lrme_hw_stop(lrme_hw, NULL, 0))
+			CAM_ERR(CAM_LRME, "Failed in hw deinit");
+		goto detach_smmu;
+	}
+
+	rc = cam_lrme_hw_stop(lrme_hw, NULL, 0);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to deinit hw, rc=%d", rc);
+		goto detach_smmu;
+	}
+
+	lrme_core->hw_idx = lrme_hw->soc_info.index;
+	lrme_hw_intf.hw_priv = lrme_hw;
+	lrme_hw_intf.hw_idx = lrme_hw->soc_info.index;
+	lrme_hw_intf.hw_ops.get_hw_caps = cam_lrme_hw_get_caps;
+	lrme_hw_intf.hw_ops.init = NULL;
+	lrme_hw_intf.hw_ops.deinit = NULL;
+	lrme_hw_intf.hw_ops.reset = cam_lrme_hw_reset;
+	lrme_hw_intf.hw_ops.reserve = NULL;
+	lrme_hw_intf.hw_ops.release = NULL;
+	lrme_hw_intf.hw_ops.start = cam_lrme_hw_start;
+	lrme_hw_intf.hw_ops.stop = cam_lrme_hw_stop;
+	lrme_hw_intf.hw_ops.read = NULL;
+	lrme_hw_intf.hw_ops.write = NULL;
+	lrme_hw_intf.hw_ops.process_cmd = cam_lrme_hw_process_cmd;
+	lrme_hw_intf.hw_ops.flush = cam_lrme_hw_flush;
+	lrme_hw_intf.hw_type = CAM_HW_LRME;
+
+	rc = cam_cdm_get_iommu_handle("lrmecdm", &lrme_core->cdm_iommu);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to acquire the CDM iommu handles");
+		goto detach_smmu;
+	}
+
+	rc = cam_lrme_mgr_register_device(&lrme_hw_intf,
+		&lrme_core->device_iommu,
+		&lrme_core->cdm_iommu);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to register device");
+		goto detach_smmu;
+	}
+
+	platform_set_drvdata(pdev, lrme_hw);
+	CAM_DBG(CAM_LRME, "LRME-%d probe successful", lrme_hw_intf.hw_idx);
+
+	return rc;
+
+detach_smmu:
+	cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+destroy_smmu:
+	cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+release_cdm:
+	cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+	kfree(lrme_core->hw_cdm_info->cdm_cmd);
+	kfree(lrme_core->hw_cdm_info);
+deinit_platform_res:
+	if (cam_lrme_soc_deinit_resources(&lrme_hw->soc_info))
+		CAM_ERR(CAM_LRME, "Failed in soc deinit");
+	mutex_destroy(&lrme_hw->hw_mutex);
+destroy_workqueue:
+	cam_req_mgr_workq_destroy(&lrme_core->work);
+free_memory:
+	mutex_destroy(&lrme_hw->hw_mutex);
+	kfree(lrme_hw);
+	kfree(lrme_core);
+
+	return rc;
+}
+
+static int cam_lrme_hw_dev_remove(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct cam_hw_info *lrme_hw;
+	struct cam_lrme_core *lrme_core;
+
+	lrme_hw = platform_get_drvdata(pdev);
+	if (!lrme_hw) {
+		CAM_ERR(CAM_LRME, "Invalid lrme_hw from fd_hw_intf");
+		rc = -ENODEV;
+		goto deinit_platform_res;
+	}
+
+	lrme_core = (struct cam_lrme_core *)lrme_hw->core_info;
+	if (!lrme_core) {
+		CAM_ERR(CAM_LRME, "Invalid lrme_core from fd_hw");
+		rc = -EINVAL;
+		goto deinit_platform_res;
+	}
+
+	cam_smmu_ops(lrme_core->device_iommu.non_secure, CAM_SMMU_DETACH);
+	cam_smmu_destroy_handle(lrme_core->device_iommu.non_secure);
+	cam_cdm_release(lrme_core->hw_cdm_info->cdm_handle);
+	cam_lrme_mgr_deregister_device(lrme_core->hw_idx);
+
+	kfree(lrme_core->hw_cdm_info->cdm_cmd);
+	kfree(lrme_core->hw_cdm_info);
+	kfree(lrme_core);
+
+deinit_platform_res:
+	rc = cam_lrme_soc_deinit_resources(&lrme_hw->soc_info);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Error in LRME soc deinit, rc=%d", rc);
+
+	mutex_destroy(&lrme_hw->hw_mutex);
+	kfree(lrme_hw);
+
+	return rc;
+}
+
+static const struct of_device_id cam_lrme_hw_dt_match[] = {
+	{
+		.compatible = "qcom,lrme",
+		.data = &cam_lrme10_hw_info,
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_lrme_hw_dt_match);
+
+static struct platform_driver cam_lrme_hw_driver = {
+	.probe = cam_lrme_hw_dev_probe,
+	.remove = cam_lrme_hw_dev_remove,
+	.driver = {
+		.name = "cam_lrme_hw",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_lrme_hw_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_lrme_hw_init_module(void)
+{
+	return platform_driver_register(&cam_lrme_hw_driver);
+}
+
+static void __exit cam_lrme_hw_exit_module(void)
+{
+	platform_driver_unregister(&cam_lrme_hw_driver);
+}
+
+module_init(cam_lrme_hw_init_module);
+module_exit(cam_lrme_hw_exit_module);
+MODULE_DESCRIPTION("CAM LRME HW driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
new file mode 100644
index 0000000..a1c5f4d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_intf.h
@@ -0,0 +1,195 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_LRME_HW_INTF_H_
+#define _CAM_LRME_HW_INTF_H_
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <media/cam_cpas.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_lrme.h>
+
+#include "cam_io_util.h"
+#include "cam_soc_util.h"
+#include "cam_hw.h"
+#include "cam_hw_intf.h"
+#include "cam_subdev.h"
+#include "cam_cpas_api.h"
+#include "cam_hw_mgr_intf.h"
+#include "cam_debug_util.h"
+
+
+#define CAM_LRME_MAX_IO_BUFFER 2
+#define CAM_LRME_MAX_HW_ENTRIES 5
+
+#define CAM_LRME_BASE_IDX 0
+
+/**
+ *  enum cam_lrme_hw_type : Enum for LRME HW type
+ *
+ * @CAM_HW_LRME : LRME HW type
+ */
+enum cam_lrme_hw_type {
+	CAM_HW_LRME,
+};
+
+/**
+ * enum cam_lrme_cb_type : HW manager call back type
+ *
+ * @CAM_LRME_CB_BUF_DONE        : Indicate buf done has been generated
+ * @CAM_LRME_CB_COMP_REG_UPDATE : Indicate receiving WE comp reg update
+ * @CAM_LRME_CB_PUT_FRAME       : Request HW manager to put back the frame
+ * @CAM_LRME_CB_ERROR           : Indicate error irq has been generated
+ */
+enum cam_lrme_cb_type {
+	CAM_LRME_CB_BUF_DONE = 1,
+	CAM_LRME_CB_COMP_REG_UPDATE = 1 << 1,
+	CAM_LRME_CB_PUT_FRAME = 1 << 2,
+	CAM_LRME_CB_ERROR = 1 << 3,
+};
+
+/**
+ * enum cam_lrme_hw_cmd_type : HW CMD type
+ *
+ * @CAM_LRME_HW_CMD_prepare_hw_update : Prepare HW update
+ * @CAM_LRME_HW_CMD_REGISTER_CB       : register HW manager callback
+ * @CAM_LRME_HW_CMD_SUBMIT            : Submit frame to HW
+ * @CAM_LRME_HW_CMD_DUMP_REGISTER     : dump register values
+ */
+enum cam_lrme_hw_cmd_type {
+	CAM_LRME_HW_CMD_PREPARE_HW_UPDATE,
+	CAM_LRME_HW_CMD_REGISTER_CB,
+	CAM_LRME_HW_CMD_SUBMIT,
+	CAM_LRME_HW_CMD_DUMP_REGISTER,
+};
+
+/**
+ * enum cam_lrme_hw_reset_type : Type of reset
+ *
+ * @CAM_LRME_HW_RESET_TYPE_HW_RESET : HW reset
+ * @CAM_LRME_HW_RESET_TYPE_SW_RESET : SW reset
+ */
+enum cam_lrme_hw_reset_type {
+	CAM_LRME_HW_RESET_TYPE_HW_RESET,
+	CAM_LRME_HW_RESET_TYPE_SW_RESET,
+};
+
+/**
+ *struct cam_lrme_frame_request : LRME frame request
+ *
+ * @frame_list            : List head
+ * @req_id                : Request ID
+ * @ctxt_to_hw_map        : Information about context id, priority and device id
+ * @hw_device             : Pointer to HW device
+ * @hw_update_entries     : List of hw_update_entries
+ * @num_hw_update_entries : number of hw_update_entries
+ */
+struct cam_lrme_frame_request {
+	struct list_head           frame_list;
+	uint64_t                   req_id;
+	void                      *ctxt_to_hw_map;
+	struct cam_lrme_device    *hw_device;
+	struct cam_hw_update_entry hw_update_entries[CAM_LRME_MAX_HW_ENTRIES];
+	uint32_t                   num_hw_update_entries;
+};
+
+/**
+ * struct cam_lrme_hw_io_buffer : IO buffer information
+ *
+ * @valid     : Indicate whether this IO config is valid
+ * @io_cfg    : Pointer to IO configuration
+ * @num_buf   : Number of buffers
+ * @num_plane : Number of planes
+ * @io_addr   : List of IO address
+ */
+struct cam_lrme_hw_io_buffer {
+	bool                   valid;
+	struct cam_buf_io_cfg *io_cfg;
+	uint32_t               num_buf;
+	uint32_t               num_plane;
+	uint64_t               io_addr[CAM_PACKET_MAX_PLANES];
+};
+
+/**
+ * struct cam_lrme_hw_cmd_config_args : Args for prepare HW update
+ *
+ * @hw_device       : Pointer to HW device
+ * @input_buf       : List of input buffers
+ * @output_buf      : List of output buffers
+ * @cmd_buf_addr    : Pointer to available KMD buffer
+ * @size            : Available KMD buffer size
+ * @config_buf_size : Size used to prepare update
+ */
+struct cam_lrme_hw_cmd_config_args {
+	struct cam_lrme_device *hw_device;
+	struct cam_lrme_hw_io_buffer input_buf[CAM_LRME_MAX_IO_BUFFER];
+	struct cam_lrme_hw_io_buffer output_buf[CAM_LRME_MAX_IO_BUFFER];
+	uint32_t *cmd_buf_addr;
+	uint32_t size;
+	uint32_t config_buf_size;
+};
+
+/**
+ * struct cam_lrme_hw_flush_args : Args for flush HW
+ *
+ * @ctxt_to_hw_map : Identity of context
+ * @req_to_flush   : Pointer to the frame need to flush in
+ *                   case of single frame flush
+ * @flush_type     : Flush type
+ */
+struct cam_lrme_hw_flush_args {
+	void                          *ctxt_to_hw_map;
+	struct cam_lrme_frame_request *req_to_flush;
+	uint32_t                       flush_type;
+};
+
+/**
+ * struct cam_lrme_hw_reset_args : Args for reset HW
+ *
+ * @reset_type : Enum cam_lrme_hw_reset_type
+ */
+struct cam_lrme_hw_reset_args {
+	uint32_t reset_type;
+};
+
+/**
+ * struct cam_lrme_hw_cb_args : HW manager callback args
+ *
+ * @cb_type   : Callback event type
+ * @frame_req : Pointer to the frame associated with the cb
+ */
+struct cam_lrme_hw_cb_args {
+	uint32_t                       cb_type;
+	struct cam_lrme_frame_request *frame_req;
+};
+
+/**
+ * struct cam_lrme_hw_cmd_set_cb : Args for set callback function
+ *
+ * @cam_lrme_hw_mgr_cb : Callback function pointer
+ * @data               : Data sent along with callback function
+ */
+struct cam_lrme_hw_cmd_set_cb {
+	int (*cam_lrme_hw_mgr_cb)(void *data,
+		struct cam_lrme_hw_cb_args *args);
+	void *data;
+};
+
+/**
+ * struct cam_lrme_hw_submit_args : Args for submit request
+ *
+ * @hw_update_entries     : List of hw update entries used to program registers
+ * @num_hw_update_entries : Number of hw update entries
+ * @frame_req             : Pointer to the frame request
+ */
+struct cam_lrme_hw_submit_args {
+	struct cam_hw_update_entry    *hw_update_entries;
+	uint32_t            num_hw_update_entries;
+	struct cam_lrme_frame_request *frame_req;
+};
+
+#endif /* _CAM_LRME_HW_INTF_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
new file mode 100644
index 0000000..8bb539e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_reg.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_LRME_HW_REG_H_
+#define _CAM_LRME_HW_REG_H_
+
+#include "cam_lrme_hw_core.h"
+
+static struct cam_lrme_hw_info cam_lrme10_hw_info = {
+	.clc_reg = {
+		.clc_hw_version                 = 0x00000000,
+		.clc_hw_status                  = 0x00000004,
+		.clc_hw_status_dbg              = 0x00000008,
+		.clc_module_cfg                 = 0x00000060,
+		.clc_moduleformat               = 0x000000A8,
+		.clc_rangestep                  = 0x00000068,
+		.clc_offset                     = 0x0000006C,
+		.clc_maxallowedsad              = 0x00000070,
+		.clc_minallowedtarmad           = 0x00000074,
+		.clc_meaningfulsaddiff          = 0x00000078,
+		.clc_minsaddiffdenom            = 0x0000007C,
+		.clc_robustnessmeasuredistmap_0 = 0x00000080,
+		.clc_robustnessmeasuredistmap_1 = 0x00000084,
+		.clc_robustnessmeasuredistmap_2 = 0x00000088,
+		.clc_robustnessmeasuredistmap_3 = 0x0000008C,
+		.clc_robustnessmeasuredistmap_4 = 0x00000090,
+		.clc_robustnessmeasuredistmap_5 = 0x00000094,
+		.clc_robustnessmeasuredistmap_6 = 0x00000098,
+		.clc_robustnessmeasuredistmap_7 = 0x0000009C,
+		.clc_ds_crop_horizontal         = 0x000000A0,
+		.clc_ds_crop_vertical           = 0x000000A4,
+		.clc_tar_pd_unpacker            = 0x000000AC,
+		.clc_ref_pd_unpacker            = 0x000000B0,
+		.clc_sw_override                = 0x000000B4,
+		.clc_tar_height                 = 0x000000B8,
+		.clc_ref_height                 = 0x000000BC,
+		.clc_test_bus_ctrl              = 0x000001F8,
+		.clc_spare                      = 0x000001FC,
+	},
+	.bus_rd_reg = {
+		.common_reg = {
+			.hw_version     = 0x00000200,
+			.hw_capability  = 0x00000204,
+			.sw_reset       = 0x00000208,
+			.cgc_override   = 0x0000020C,
+			.irq_mask       = 0x00000210,
+			.irq_clear      = 0x00000214,
+			.irq_cmd        = 0x00000218,
+			.irq_status     = 0x0000021C,
+			.cmd            = 0x00000220,
+			.irq_set        = 0x00000224,
+			.misr_reset     = 0x0000022C,
+			.security_cfg   = 0x00000230,
+			.pwr_iso_cfg    = 0x00000234,
+			.pwr_iso_seed   = 0x00000238,
+			.test_bus_ctrl  = 0x00000248,
+			.spare          = 0x0000024C,
+		},
+		.bus_client_reg = {
+			/* bus client 0 */
+			{
+				.core_cfg                = 0x00000250,
+				.ccif_meta_data          = 0x00000254,
+				.addr_image              = 0x00000258,
+				.rd_buffer_size          = 0x0000025C,
+				.rd_stride               = 0x00000260,
+				.unpack_cfg_0            = 0x00000264,
+				.latency_buff_allocation = 0x00000278,
+				.burst_limit_cfg         = 0x00000280,
+				.misr_cfg_0              = 0x00000284,
+				.misr_cfg_1              = 0x00000288,
+				.misr_rd_val             = 0x0000028C,
+				.debug_status_cfg        = 0x00000290,
+				.debug_status_0          = 0x00000294,
+				.debug_status_1          = 0x00000298,
+			},
+			/* bus client 1 */
+			{
+				.core_cfg                = 0x000002F0,
+				.ccif_meta_data          = 0x000002F4,
+				.addr_image              = 0x000002F8,
+				.rd_buffer_size          = 0x000002FC,
+				.rd_stride               = 0x00000300,
+				.unpack_cfg_0            = 0x00000304,
+				.latency_buff_allocation = 0x00000318,
+				.burst_limit_cfg         = 0x00000320,
+				.misr_cfg_0              = 0x00000324,
+				.misr_cfg_1              = 0x00000328,
+				.misr_rd_val             = 0x0000032C,
+				.debug_status_cfg        = 0x00000330,
+				.debug_status_0          = 0x00000334,
+				.debug_status_1          = 0x00000338,
+			},
+		},
+	},
+	.bus_wr_reg = {
+		.common_reg = {
+			.hw_version        = 0x00000500,
+			.hw_capability     = 0x00000504,
+			.sw_reset          = 0x00000508,
+			.cgc_override      = 0x0000050C,
+			.misr_reset        = 0x000005C8,
+			.pwr_iso_cfg       = 0x000005CC,
+			.test_bus_ctrl     = 0x0000061C,
+			.composite_mask_0  = 0x00000510,
+			.irq_mask_0        = 0x00000544,
+			.irq_mask_1        = 0x00000548,
+			.irq_clear_0       = 0x00000550,
+			.irq_clear_1       = 0x00000554,
+			.irq_status_0      = 0x0000055C,
+			.irq_status_1      = 0x00000560,
+			.irq_cmd           = 0x00000568,
+			.irq_set_0         = 0x000005BC,
+			.irq_set_1         = 0x000005C0,
+			.addr_fifo_status  = 0x000005A8,
+			.frame_header_cfg0 = 0x000005AC,
+			.frame_header_cfg1 = 0x000005B0,
+			.spare             = 0x00000620,
+		},
+		.bus_client_reg = {
+			/* bus client 0 */
+			{
+				.status_0                  = 0x00000700,
+				.status_1                  = 0x00000704,
+				.cfg                       = 0x00000708,
+				.addr_frame_header         = 0x0000070C,
+				.frame_header_cfg          = 0x00000710,
+				.addr_image                = 0x00000714,
+				.addr_image_offset         = 0x00000718,
+				.buffer_width_cfg          = 0x0000071C,
+				.buffer_height_cfg         = 0x00000720,
+				.packer_cfg                = 0x00000724,
+				.wr_stride                 = 0x00000728,
+				.irq_subsample_cfg_period  = 0x00000748,
+				.irq_subsample_cfg_pattern = 0x0000074C,
+				.burst_limit_cfg           = 0x0000075C,
+				.misr_cfg                  = 0x00000760,
+				.misr_rd_word_sel          = 0x00000764,
+				.misr_val                  = 0x00000768,
+				.debug_status_cfg          = 0x0000076C,
+				.debug_status_0            = 0x00000770,
+				.debug_status_1            = 0x00000774,
+			},
+			/* bus client 1 */
+			{
+				.status_0                  = 0x00000800,
+				.status_1                  = 0x00000804,
+				.cfg                       = 0x00000808,
+				.addr_frame_header         = 0x0000080C,
+				.frame_header_cfg          = 0x00000810,
+				.addr_image                = 0x00000814,
+				.addr_image_offset         = 0x00000818,
+				.buffer_width_cfg          = 0x0000081C,
+				.buffer_height_cfg         = 0x00000820,
+				.packer_cfg                = 0x00000824,
+				.wr_stride                 = 0x00000828,
+				.irq_subsample_cfg_period  = 0x00000848,
+				.irq_subsample_cfg_pattern = 0x0000084C,
+				.burst_limit_cfg           = 0x0000085C,
+				.misr_cfg                  = 0x00000860,
+				.misr_rd_word_sel          = 0x00000864,
+				.misr_val                  = 0x00000868,
+				.debug_status_cfg          = 0x0000086C,
+				.debug_status_0            = 0x00000870,
+				.debug_status_1            = 0x00000874,
+			},
+		},
+	},
+	.titan_reg = {
+		.top_hw_version        = 0x00000900,
+		.top_titan_version     = 0x00000904,
+		.top_rst_cmd           = 0x00000908,
+		.top_core_clk_cfg      = 0x00000920,
+		.top_irq_status        = 0x0000090C,
+		.top_irq_mask          = 0x00000910,
+		.top_irq_clear         = 0x00000914,
+		.top_irq_set           = 0x00000918,
+		.top_irq_cmd           = 0x0000091C,
+		.top_violation_status  = 0x00000924,
+		.top_spare             = 0x000009FC,
+	},
+};
+
+#endif /* _CAM_LRME_HW_REG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
new file mode 100644
index 0000000..5f23827
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "cam_lrme_hw_core.h"
+#include "cam_lrme_hw_soc.h"
+
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw)
+{
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_soc_private *soc_private =
+		(struct cam_lrme_soc_private *)soc_info->soc_private;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	int rc = 0;
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = 7200000;
+	axi_vote.uncompressed_bw = 7200000;
+	rc = cam_cpas_start(soc_private->cpas_handle, &ahb_vote, &axi_vote);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to start cpas, rc %d", rc);
+		return -EFAULT;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true, CAM_SVS_VOTE,
+		true);
+	if (rc) {
+		CAM_ERR(CAM_LRME,
+			"Failed to enable platform resource, rc %d", rc);
+		goto stop_cpas;
+	}
+
+	cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_ENABLE);
+
+	return rc;
+
+stop_cpas:
+	if (cam_cpas_stop(soc_private->cpas_handle))
+		CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+	return rc;
+}
+
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw)
+{
+	struct cam_hw_soc_info *soc_info = &lrme_hw->soc_info;
+	struct cam_lrme_soc_private *soc_private;
+	int rc = 0;
+
+	soc_private = soc_info->soc_private;
+
+	cam_lrme_set_irq(lrme_hw, CAM_LRME_IRQ_DISABLE);
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed to disable platform resource");
+		return rc;
+	}
+	rc = cam_cpas_stop(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Failed to stop cpas");
+
+	return rc;
+}
+
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data)
+{
+	struct cam_lrme_soc_private *soc_private;
+	struct cam_cpas_register_params cpas_register_param;
+	int rc;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed in get_dt_properties, rc=%d", rc);
+		return rc;
+	}
+
+	rc = cam_soc_util_request_platform_resource(soc_info, irq_handler,
+		private_data);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "Failed in request_platform_resource rc=%d",
+			rc);
+		return rc;
+	}
+
+	soc_private = kzalloc(sizeof(struct cam_lrme_soc_private), GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto release_res;
+	}
+	soc_info->soc_private = soc_private;
+
+	memset(&cpas_register_param, 0, sizeof(cpas_register_param));
+	strlcpy(cpas_register_param.identifier,
+		"lrmecpas", CAM_HW_IDENTIFIER_LENGTH);
+	cpas_register_param.cell_index = soc_info->index;
+	cpas_register_param.dev = &soc_info->pdev->dev;
+	cpas_register_param.userdata = private_data;
+	cpas_register_param.cam_cpas_client_cb = NULL;
+
+	rc = cam_cpas_register_client(&cpas_register_param);
+	if (rc) {
+		CAM_ERR(CAM_LRME, "CPAS registration failed");
+		goto free_soc_private;
+	}
+	soc_private->cpas_handle = cpas_register_param.client_handle;
+	CAM_DBG(CAM_LRME, "CPAS handle=%d", soc_private->cpas_handle);
+
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+release_res:
+	cam_soc_util_release_platform_resource(soc_info);
+
+	return rc;
+}
+
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info)
+{
+	struct cam_lrme_soc_private *soc_private =
+		(struct cam_lrme_soc_private *)soc_info->soc_private;
+	int rc;
+
+	rc = cam_cpas_unregister_client(soc_private->cpas_handle);
+	if (rc)
+		CAM_ERR(CAM_LRME, "Unregister cpas failed, handle=%d, rc=%d",
+			soc_private->cpas_handle, rc);
+
+	rc = cam_soc_util_release_platform_resource(soc_info);
+	if (rc)
+		CAM_ERR(CAM_LRME, "release platform failed, rc=%d", rc);
+
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
new file mode 100644
index 0000000..a37e435
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_lrme/lrme_hw_mgr/lrme_hw/cam_lrme_hw_soc.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_LRME_HW_SOC_H_
+#define _CAM_LRME_HW_SOC_H_
+
+#include "cam_soc_util.h"
+
+struct cam_lrme_soc_private {
+	uint32_t cpas_handle;
+};
+
+int cam_lrme_soc_enable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_disable_resources(struct cam_hw_info *lrme_hw);
+int cam_lrme_soc_init_resources(struct cam_hw_soc_info *soc_info,
+	irq_handler_t irq_handler, void *private_data);
+int cam_lrme_soc_deinit_resources(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_LRME_HW_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/Makefile b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
new file mode 100644
index 0000000..44cae4f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_req_mgr_core.o\
+				cam_req_mgr_dev.o \
+				cam_req_mgr_util.o \
+				cam_req_mgr_workq.o \
+				cam_mem_mgr.o \
+				cam_req_mgr_timer.o \
+				cam_req_mgr_debug.o
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
new file mode 100644
index 0000000..ffbaef8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.c
@@ -0,0 +1,1342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <linux/ion_kernel.h>
+#include <linux/dma-buf.h>
+
+#include "cam_req_mgr_util.h"
+#include "cam_mem_mgr.h"
+#include "cam_smmu_api.h"
+#include "cam_debug_util.h"
+
+static struct cam_mem_table tbl;
+
+static int cam_mem_util_map_cpu_va(struct dma_buf *dmabuf,
+	uintptr_t *vaddr,
+	size_t *len)
+{
+	int i, j, rc;
+	void *addr;
+
+	/*
+	 * dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
+	 * need to be called in pair to avoid stability issue.
+	 */
+	rc = dma_buf_begin_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "dma begin access failed rc=%d", rc);
+		return rc;
+	}
+
+	/*
+	 * Code could be simplified if ION support of dma_buf_vmap is
+	 * available. This workaround takes the avandaage that ion_alloc
+	 * returns a virtually contiguous memory region, so we just need
+	 * to _kmap each individual page and then only use the virtual
+	 * address returned from the first call to _kmap.
+	 */
+	for (i = 0; i < PAGE_ALIGN(dmabuf->size) / PAGE_SIZE; i++) {
+		addr = dma_buf_kmap(dmabuf, i);
+		if (IS_ERR_OR_NULL(addr)) {
+			CAM_ERR(CAM_MEM, "kernel map fail");
+			for (j = 0; j < i; j++)
+				dma_buf_kunmap(dmabuf,
+					j,
+					(void *)(*vaddr + (j * PAGE_SIZE)));
+			*vaddr = 0;
+			*len = 0;
+			rc = -ENOSPC;
+			goto fail;
+		}
+		if (i == 0)
+			*vaddr = (uint64_t)addr;
+	}
+
+	*len = dmabuf->size;
+
+	return 0;
+
+fail:
+	dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
+	return rc;
+}
+
+static int cam_mem_util_unmap_cpu_va(struct dma_buf *dmabuf,
+	uint64_t vaddr)
+{
+	int i, rc = 0, page_num;
+
+	if (!dmabuf || !vaddr) {
+		CAM_ERR(CAM_MEM, "Invalid input args %pK %llX", dmabuf, vaddr);
+		return -EINVAL;
+	}
+
+	page_num = PAGE_ALIGN(dmabuf->size) / PAGE_SIZE;
+
+	for (i = 0; i < page_num; i++) {
+		dma_buf_kunmap(dmabuf, i,
+			(void *)(vaddr + (i * PAGE_SIZE)));
+	}
+
+	/*
+	 * dma_buf_begin_cpu_access() and
+	 * dma_buf_end_cpu_access() need to be called in pair
+	 * to avoid stability issue.
+	 */
+	rc = dma_buf_end_cpu_access(dmabuf, DMA_BIDIRECTIONAL);
+	if (rc) {
+		CAM_ERR(CAM_MEM, "Failed in end cpu access, dmabuf=%pK",
+			dmabuf);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int cam_mem_util_get_dma_dir(uint32_t flags)
+{
+	int rc = -EINVAL;
+
+	if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
+		rc = DMA_TO_DEVICE;
+	else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
+		rc = DMA_FROM_DEVICE;
+	else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
+		rc = DMA_BIDIRECTIONAL;
+	else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		rc = DMA_BIDIRECTIONAL;
+
+	return rc;
+}
+
+int cam_mem_mgr_init(void)
+{
+	int i;
+	int bitmap_size;
+
+	memset(tbl.bufq, 0, sizeof(tbl.bufq));
+
+	bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
+	tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!tbl.bitmap)
+		return -ENOMEM;
+
+	tbl.bits = bitmap_size * BITS_PER_BYTE;
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	/* We need to reserve slot 0 because 0 is invalid */
+	set_bit(0, tbl.bitmap);
+
+	for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+		tbl.bufq[i].fd = -1;
+		tbl.bufq[i].buf_handle = -1;
+	}
+	mutex_init(&tbl.m_lock);
+
+	return 0;
+}
+
+static int32_t cam_mem_get_slot(void)
+{
+	int32_t idx;
+
+	mutex_lock(&tbl.m_lock);
+	idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		mutex_unlock(&tbl.m_lock);
+		return -ENOMEM;
+	}
+
+	set_bit(idx, tbl.bitmap);
+	tbl.bufq[idx].active = true;
+	mutex_init(&tbl.bufq[idx].q_lock);
+	mutex_unlock(&tbl.m_lock);
+
+	return idx;
+}
+
+static void cam_mem_put_slot(int32_t idx)
+{
+	mutex_lock(&tbl.m_lock);
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].active = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	mutex_destroy(&tbl.bufq[idx].q_lock);
+	clear_bit(idx, tbl.bitmap);
+	mutex_unlock(&tbl.m_lock);
+}
+
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+	uint64_t *iova_ptr, size_t *len_ptr)
+{
+	int rc = 0, idx;
+
+	*len_ptr = 0;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	if (!tbl.bufq[idx].active)
+		return -EINVAL;
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	if (buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto handle_mismatch;
+	}
+
+	if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
+		rc = cam_smmu_get_stage2_iova(mmu_handle,
+			tbl.bufq[idx].fd,
+			iova_ptr,
+			len_ptr);
+	else
+		rc = cam_smmu_get_iova(mmu_handle,
+			tbl.bufq[idx].fd,
+			iova_ptr,
+			len_ptr);
+	if (rc) {
+		CAM_ERR(CAM_MEM,
+			"fail to map buf_hdl:0x%x, mmu_hdl: 0x%x for fd:%d",
+			buf_handle, mmu_handle, tbl.bufq[idx].fd);
+		goto handle_mismatch;
+	}
+
+	CAM_DBG(CAM_MEM,
+		"handle:0x%x fd:%d iova_ptr:%pK len_ptr:%llu",
+		mmu_handle, tbl.bufq[idx].fd, iova_ptr, *len_ptr);
+handle_mismatch:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_io_buf);
+
+int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr, size_t *len)
+{
+	int rc = 0;
+	int idx;
+	struct dma_buf *dmabuf = NULL;
+	uintptr_t kvaddr = 0;
+	size_t klen = 0;
+
+	if (!buf_handle || !vaddr_ptr || !len)
+		return -EINVAL;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	if (!tbl.bufq[idx].active)
+		return -EPERM;
+
+	if (buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto exit_func;
+	}
+
+	if (!(tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)) {
+		rc = -EINVAL;
+		goto exit_func;
+	}
+
+	if (!tbl.bufq[idx].kmdvaddr) {
+		mutex_lock(&tbl.bufq[idx].q_lock);
+		dmabuf = tbl.bufq[idx].dma_buf;
+		if (!dmabuf) {
+			CAM_ERR(CAM_MEM, "Invalid DMA buffer pointer");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		rc = cam_mem_util_map_cpu_va(dmabuf,
+			&kvaddr, &klen);
+		if (rc)
+			goto release_mutex;
+
+		tbl.bufq[idx].kmdvaddr = kvaddr;
+		mutex_unlock(&tbl.bufq[idx].q_lock);
+	}
+
+	*vaddr_ptr = tbl.bufq[idx].kmdvaddr;
+	*len = tbl.bufq[idx].len;
+
+	return rc;
+
+release_mutex:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+exit_func:
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_get_cpu_buf);
+
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
+{
+	int rc = 0, idx;
+	uint32_t cache_dir;
+	unsigned long dmabuf_flag = 0;
+
+	if (!cmd)
+		return -EINVAL;
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
+		return -EINVAL;
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+
+	if (!tbl.bufq[idx].active) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
+		rc = -EINVAL;
+		goto end;
+	}
+
+	rc = dma_buf_get_flags(tbl.bufq[idx].dma_buf, &dmabuf_flag);
+	if (rc) {
+		CAM_ERR(CAM_MEM, "cache get flags failed %d", rc);
+		goto end;
+	}
+
+	if (dmabuf_flag & ION_FLAG_CACHED) {
+		switch (cmd->mem_cache_ops) {
+		case CAM_MEM_CLEAN_CACHE:
+			cache_dir = DMA_TO_DEVICE;
+			break;
+		case CAM_MEM_INV_CACHE:
+			cache_dir = DMA_FROM_DEVICE;
+			break;
+		case CAM_MEM_CLEAN_INV_CACHE:
+			cache_dir = DMA_BIDIRECTIONAL;
+			break;
+		default:
+			CAM_ERR(CAM_MEM,
+				"invalid cache ops :%d", cmd->mem_cache_ops);
+			rc = -EINVAL;
+			goto end;
+		}
+	} else {
+		CAM_DBG(CAM_MEM, "BUF is not cached");
+		goto end;
+	}
+
+	rc = dma_buf_begin_cpu_access(tbl.bufq[idx].dma_buf,
+		(cmd->mem_cache_ops == CAM_MEM_CLEAN_INV_CACHE) ?
+		DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	if (rc) {
+		CAM_ERR(CAM_MEM, "dma begin access failed rc=%d", rc);
+		goto end;
+	}
+
+	rc = dma_buf_end_cpu_access(tbl.bufq[idx].dma_buf,
+		cache_dir);
+	if (rc) {
+		CAM_ERR(CAM_MEM, "dma end access failed rc=%d", rc);
+		goto end;
+	}
+
+end:
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
+
+static int cam_mem_util_get_dma_buf(size_t len,
+	unsigned int heap_id_mask,
+	unsigned int flags,
+	struct dma_buf **buf)
+{
+	int rc = 0;
+
+	if (!buf) {
+		CAM_ERR(CAM_MEM, "Invalid params");
+		return -EINVAL;
+	}
+
+	*buf = ion_alloc(len, heap_id_mask, flags);
+	if (IS_ERR_OR_NULL(*buf))
+		return -ENOMEM;
+
+	return rc;
+}
+
+static int cam_mem_util_get_dma_buf_fd(size_t len,
+	size_t align,
+	unsigned int heap_id_mask,
+	unsigned int flags,
+	struct dma_buf **buf,
+	int *fd)
+{
+	int rc = 0;
+
+	if (!buf || !fd) {
+		CAM_ERR(CAM_MEM, "Invalid params, buf=%pK, fd=%pK", buf, fd);
+		return -EINVAL;
+	}
+
+	*buf = ion_alloc(len, heap_id_mask, flags);
+	if (IS_ERR_OR_NULL(*buf))
+		return -ENOMEM;
+
+	*fd = dma_buf_fd(*buf, O_CLOEXEC);
+	if (*fd < 0) {
+		CAM_ERR(CAM_MEM, "get fd fail, *fd=%d", *fd);
+		rc = -EINVAL;
+		goto get_fd_fail;
+	}
+
+	/*
+	 * increment the ref count so that ref count becomes 2 here
+	 * when we close fd, refcount becomes 1 and when we do
+	 * dmap_put_buf, ref count becomes 0 and memory will be freed.
+	 */
+	dma_buf_get(*fd);
+
+	return rc;
+
+get_fd_fail:
+	dma_buf_put(*buf);
+	return rc;
+}
+
+static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
+	struct dma_buf **dmabuf,
+	int *fd)
+{
+	uint32_t heap_id;
+	uint32_t ion_flag = 0;
+	int rc;
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+		heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
+		ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
+	} else {
+		heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
+			ION_HEAP(ION_CAMERA_HEAP_ID);
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_CACHE)
+		ion_flag |= ION_FLAG_CACHED;
+	else
+		ion_flag &= ~ION_FLAG_CACHED;
+
+	rc = cam_mem_util_get_dma_buf_fd(cmd->len,
+		cmd->align,
+		heap_id,
+		ion_flag,
+		dmabuf,
+		fd);
+
+	return rc;
+}
+
+
+static int cam_mem_util_check_alloc_flags(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+		CAM_ERR(CAM_MEM, "Num of mmu hdl exceeded maximum(%d)",
+			CAM_MEM_MMU_MAX_HANDLE);
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+		cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		CAM_ERR(CAM_MEM, "Kernel mapping in secure mode not allowed");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
+{
+	if (!cmd->flags) {
+		CAM_ERR(CAM_MEM, "Invalid flags");
+		return -EINVAL;
+	}
+
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+		CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
+			cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
+		cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		CAM_ERR(CAM_MEM,
+			"Kernel mapping in secure mode not allowed, flags=0x%x",
+			cmd->flags);
+		return -EINVAL;
+	}
+
+	if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+		CAM_ERR(CAM_MEM,
+			"Shared memory buffers are not allowed to be mapped");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int cam_mem_util_map_hw_va(uint32_t flags,
+	int32_t *mmu_hdls,
+	int32_t num_hdls,
+	int fd,
+	dma_addr_t *hw_vaddr,
+	size_t *len,
+	enum cam_smmu_region_id region)
+{
+	int i;
+	int rc = -1;
+	int dir = cam_mem_util_get_dma_dir(flags);
+
+	if (dir < 0) {
+		CAM_ERR(CAM_MEM, "fail to map DMA direction, dir=%d", dir);
+		return dir;
+	}
+
+	CAM_DBG(CAM_MEM,
+		"map_hw_va : fd = %d,  flags = 0x%x, dir=%d, num_hdls=%d",
+		fd, flags, dir, num_hdls);
+
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
+				fd,
+				dir,
+				hw_vaddr,
+				len);
+
+			if (rc < 0) {
+				CAM_ERR(CAM_MEM,
+					"Failed to securely map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, rc=%d",
+					i, fd, dir, mmu_hdls[i], rc);
+				goto multi_map_fail;
+			}
+		}
+	} else {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_map_user_iova(mmu_hdls[i],
+				fd,
+				dir,
+				(dma_addr_t *)hw_vaddr,
+				len,
+				region);
+
+			if (rc < 0) {
+				CAM_ERR(CAM_MEM,
+					"Failed to map to smmu, i=%d, fd=%d, dir=%d, mmu_hdl=%d, region=%d, rc=%d",
+					i, fd, dir, mmu_hdls[i], region, rc);
+				goto multi_map_fail;
+			}
+		}
+	}
+
+	return rc;
+multi_map_fail:
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		for (--i; i > 0; i--)
+			cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
+	else
+		for (--i; i > 0; i--)
+			cam_smmu_unmap_user_iova(mmu_hdls[i],
+				fd,
+				CAM_SMMU_REGION_IO);
+	return rc;
+
+}
+
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
+{
+	int rc;
+	int32_t idx;
+	struct dma_buf *dmabuf = NULL;
+	int fd = -1;
+	dma_addr_t hw_vaddr = 0;
+	size_t len;
+
+	if (!cmd) {
+		CAM_ERR(CAM_MEM, " Invalid argument");
+		return -EINVAL;
+	}
+	len = cmd->len;
+
+	rc = cam_mem_util_check_alloc_flags(cmd);
+	if (rc) {
+		CAM_ERR(CAM_MEM, "Invalid flags: flags = 0x%X, rc=%d",
+			cmd->flags, rc);
+		return rc;
+	}
+
+	rc = cam_mem_util_ion_alloc(cmd,
+		&dmabuf,
+		&fd);
+	if (rc) {
+		CAM_ERR(CAM_MEM,
+			"Ion Alloc failed, len=%llu, align=%llu, flags=0x%x, num_hdl=%d",
+			cmd->len, cmd->align, cmd->flags, cmd->num_hdl);
+		return rc;
+	}
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		CAM_ERR(CAM_MEM, "Failed in getting mem slot, idx=%d", idx);
+		rc = -ENOMEM;
+		goto slot_fail;
+	}
+
+	if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
+		(cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
+
+		enum cam_smmu_region_id region;
+
+		if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+			region = CAM_SMMU_REGION_IO;
+
+
+		if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+			region = CAM_SMMU_REGION_SHARED;
+
+		rc = cam_mem_util_map_hw_va(cmd->flags,
+			cmd->mmu_hdls,
+			cmd->num_hdl,
+			fd,
+			&hw_vaddr,
+			&len,
+			region);
+
+		if (rc) {
+			CAM_ERR(CAM_MEM,
+				"Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
+				cmd->flags, fd, region, cmd->num_hdl, rc);
+			goto map_hw_fail;
+		}
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].fd = fd;
+	tbl.bufq[idx].dma_buf = NULL;
+	tbl.bufq[idx].flags = cmd->flags;
+	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, fd);
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
+	tbl.bufq[idx].kmdvaddr = 0;
+
+	if (cmd->num_hdl > 0)
+		tbl.bufq[idx].vaddr = hw_vaddr;
+	else
+		tbl.bufq[idx].vaddr = 0;
+
+	tbl.bufq[idx].dma_buf = dmabuf;
+	tbl.bufq[idx].len = cmd->len;
+	tbl.bufq[idx].num_hdl = cmd->num_hdl;
+	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+		sizeof(int32_t) * cmd->num_hdl);
+	tbl.bufq[idx].is_imported = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+	cmd->out.fd = tbl.bufq[idx].fd;
+	cmd->out.vaddr = 0;
+
+	CAM_DBG(CAM_MEM,
+		"fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
+		cmd->out.fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
+		tbl.bufq[idx].len);
+
+	return rc;
+
+map_hw_fail:
+	cam_mem_put_slot(idx);
+slot_fail:
+	dma_buf_put(dmabuf);
+	return rc;
+}
+
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
+{
+	int32_t idx;
+	int rc;
+	struct dma_buf *dmabuf;
+	dma_addr_t hw_vaddr = 0;
+	size_t len = 0;
+
+	if (!cmd || (cmd->fd < 0)) {
+		CAM_ERR(CAM_MEM, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
+		CAM_ERR(CAM_MEM, "Num of mmu hdl %d exceeded maximum(%d)",
+			cmd->num_hdl, CAM_MEM_MMU_MAX_HANDLE);
+		return -EINVAL;
+	}
+
+	rc = cam_mem_util_check_map_flags(cmd);
+	if (rc) {
+		CAM_ERR(CAM_MEM, "Invalid flags: flags = %X", cmd->flags);
+		return rc;
+	}
+
+	dmabuf = dma_buf_get(cmd->fd);
+	if (IS_ERR_OR_NULL((void *)(dmabuf))) {
+		CAM_ERR(CAM_MEM, "Failed to import dma_buf fd");
+		return -EINVAL;
+	}
+
+	if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
+		rc = cam_mem_util_map_hw_va(cmd->flags,
+			cmd->mmu_hdls,
+			cmd->num_hdl,
+			cmd->fd,
+			&hw_vaddr,
+			&len,
+			CAM_SMMU_REGION_IO);
+		if (rc) {
+			CAM_ERR(CAM_MEM,
+				"Failed in map_hw_va, flags=0x%x, fd=%d, region=%d, num_hdl=%d, rc=%d",
+				cmd->flags, cmd->fd, CAM_SMMU_REGION_IO,
+				cmd->num_hdl, rc);
+			goto map_fail;
+		}
+	}
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto map_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].fd = cmd->fd;
+	tbl.bufq[idx].dma_buf = NULL;
+	tbl.bufq[idx].flags = cmd->flags;
+	tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
+	if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
+		CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
+	tbl.bufq[idx].kmdvaddr = 0;
+
+	if (cmd->num_hdl > 0)
+		tbl.bufq[idx].vaddr = hw_vaddr;
+	else
+		tbl.bufq[idx].vaddr = 0;
+
+	tbl.bufq[idx].dma_buf = dmabuf;
+	tbl.bufq[idx].len = len;
+	tbl.bufq[idx].num_hdl = cmd->num_hdl;
+	memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
+		sizeof(int32_t) * cmd->num_hdl);
+	tbl.bufq[idx].is_imported = true;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
+	cmd->out.vaddr = 0;
+
+	CAM_DBG(CAM_MEM,
+		"fd=%d, flags=0x%x, num_hdl=%d, idx=%d, buf handle=%x, len=%zu",
+		cmd->fd, cmd->flags, cmd->num_hdl, idx, cmd->out.buf_handle,
+		tbl.bufq[idx].len);
+
+	return rc;
+
+map_fail:
+	dma_buf_put(dmabuf);
+	return rc;
+}
+
+static int cam_mem_util_unmap_hw_va(int32_t idx,
+	enum cam_smmu_region_id region,
+	enum cam_smmu_mapping_client client)
+{
+	int i;
+	uint32_t flags;
+	int32_t *mmu_hdls;
+	int num_hdls;
+	int fd;
+	int rc = 0;
+
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		CAM_ERR(CAM_MEM, "Incorrect index");
+		return -EINVAL;
+	}
+
+	flags = tbl.bufq[idx].flags;
+	mmu_hdls = tbl.bufq[idx].hdls;
+	num_hdls = tbl.bufq[idx].num_hdl;
+	fd = tbl.bufq[idx].fd;
+
+	CAM_DBG(CAM_MEM,
+		"unmap_hw_va : idx=%d, fd=%x, flags=0x%x, num_hdls=%d, client=%d",
+		idx, fd, flags, num_hdls, client);
+
+	if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
+		for (i = 0; i < num_hdls; i++) {
+			rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
+			if (rc < 0) {
+				CAM_ERR(CAM_MEM,
+					"Failed in secure unmap, i=%d, fd=%d, mmu_hdl=%d, rc=%d",
+					i, fd, mmu_hdls[i], rc);
+				goto unmap_end;
+			}
+		}
+	} else {
+		for (i = 0; i < num_hdls; i++) {
+			if (client == CAM_SMMU_MAPPING_USER) {
+				rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
+					fd, region);
+			} else if (client == CAM_SMMU_MAPPING_KERNEL) {
+				rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
+					tbl.bufq[idx].dma_buf, region);
+			} else {
+				CAM_ERR(CAM_MEM,
+					"invalid caller for unmapping : %d",
+					client);
+				rc = -EINVAL;
+			}
+			if (rc < 0) {
+				CAM_ERR(CAM_MEM,
+					"Failed in unmap, i=%d, fd=%d, mmu_hdl=%d, region=%d, rc=%d",
+					i, fd, mmu_hdls[i], region, rc);
+				goto unmap_end;
+			}
+		}
+	}
+
+	return rc;
+
+unmap_end:
+	CAM_ERR(CAM_MEM, "unmapping failed");
+	return rc;
+}
+
+static void cam_mem_mgr_unmap_active_buf(int idx)
+{
+	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
+		region = CAM_SMMU_REGION_SHARED;
+	else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+		region = CAM_SMMU_REGION_IO;
+
+	cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
+}
+
+static int cam_mem_mgr_cleanup_table(void)
+{
+	int i;
+
+	mutex_lock(&tbl.m_lock);
+	for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
+		if (!tbl.bufq[i].active) {
+			CAM_DBG(CAM_MEM,
+				"Buffer inactive at idx=%d, continuing", i);
+			continue;
+		} else {
+			CAM_DBG(CAM_MEM,
+			"Active buffer at idx=%d, possible leak needs unmapping",
+			i);
+			cam_mem_mgr_unmap_active_buf(i);
+		}
+
+		mutex_lock(&tbl.bufq[i].q_lock);
+		if (tbl.bufq[i].dma_buf) {
+			dma_buf_put(tbl.bufq[i].dma_buf);
+			tbl.bufq[i].dma_buf = NULL;
+		}
+		tbl.bufq[i].fd = -1;
+		tbl.bufq[i].flags = 0;
+		tbl.bufq[i].buf_handle = -1;
+		tbl.bufq[i].vaddr = 0;
+		tbl.bufq[i].len = 0;
+		memset(tbl.bufq[i].hdls, 0,
+			sizeof(int32_t) * tbl.bufq[i].num_hdl);
+		tbl.bufq[i].num_hdl = 0;
+		tbl.bufq[i].dma_buf = NULL;
+		tbl.bufq[i].active = false;
+		mutex_unlock(&tbl.bufq[i].q_lock);
+		mutex_destroy(&tbl.bufq[i].q_lock);
+	}
+
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	/* We need to reserve slot 0 because 0 is invalid */
+	set_bit(0, tbl.bitmap);
+	mutex_unlock(&tbl.m_lock);
+
+	return 0;
+}
+
+void cam_mem_mgr_deinit(void)
+{
+	cam_mem_mgr_cleanup_table();
+	mutex_lock(&tbl.m_lock);
+	bitmap_zero(tbl.bitmap, tbl.bits);
+	kfree(tbl.bitmap);
+	tbl.bitmap = NULL;
+	mutex_unlock(&tbl.m_lock);
+	mutex_destroy(&tbl.m_lock);
+}
+
+static int cam_mem_util_unmap(int32_t idx,
+	enum cam_smmu_mapping_client client)
+{
+	int rc = 0;
+	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
+
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		CAM_ERR(CAM_MEM, "Incorrect index");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_MEM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
+
+	mutex_lock(&tbl.m_lock);
+	if ((!tbl.bufq[idx].active) &&
+		(tbl.bufq[idx].vaddr) == 0) {
+		CAM_WARN(CAM_MEM, "Buffer at idx=%d is already unmapped,",
+			idx);
+		mutex_unlock(&tbl.m_lock);
+		return 0;
+	}
+
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
+		if (tbl.bufq[idx].dma_buf && tbl.bufq[idx].kmdvaddr) {
+			rc = cam_mem_util_unmap_cpu_va(tbl.bufq[idx].dma_buf,
+				tbl.bufq[idx].kmdvaddr);
+			if (rc)
+				CAM_ERR(CAM_MEM,
+					"Failed, dmabuf=%pK, kmdvaddr=%pK",
+					tbl.bufq[idx].dma_buf,
+					tbl.bufq[idx].kmdvaddr);
+		}
+	}
+
+	/* SHARED flag gets precedence, all other flags after it */
+	if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+		region = CAM_SMMU_REGION_SHARED;
+	} else {
+		if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
+			region = CAM_SMMU_REGION_IO;
+	}
+
+	if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
+		(tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
+		(tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
+		if (cam_mem_util_unmap_hw_va(idx, region, client))
+			CAM_ERR(CAM_MEM, "Failed, dmabuf=%pK",
+				tbl.bufq[idx].dma_buf);
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	tbl.bufq[idx].flags = 0;
+	tbl.bufq[idx].buf_handle = -1;
+	tbl.bufq[idx].vaddr = 0;
+	memset(tbl.bufq[idx].hdls, 0,
+		sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
+
+	CAM_DBG(CAM_MEM,
+		"Ion buf at idx = %d freeing fd = %d, imported %d, dma_buf %pK",
+		idx, tbl.bufq[idx].fd,
+		tbl.bufq[idx].is_imported,
+		tbl.bufq[idx].dma_buf);
+
+	if (tbl.bufq[idx].dma_buf) {
+		dma_buf_put(tbl.bufq[idx].dma_buf);
+		tbl.bufq[idx].dma_buf = NULL;
+	}
+
+	tbl.bufq[idx].fd = -1;
+	tbl.bufq[idx].dma_buf = NULL;
+	tbl.bufq[idx].is_imported = false;
+	tbl.bufq[idx].len = 0;
+	tbl.bufq[idx].num_hdl = 0;
+	tbl.bufq[idx].active = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+	mutex_destroy(&tbl.bufq[idx].q_lock);
+	clear_bit(idx, tbl.bitmap);
+	mutex_unlock(&tbl.m_lock);
+
+	return rc;
+}
+
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
+{
+	int idx;
+	int rc;
+
+	if (!cmd) {
+		CAM_ERR(CAM_MEM, "Invalid argument");
+		return -EINVAL;
+	}
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		CAM_ERR(CAM_MEM, "Incorrect index %d extracted from mem handle",
+			idx);
+		return -EINVAL;
+	}
+
+	if (!tbl.bufq[idx].active) {
+		CAM_ERR(CAM_MEM, "Released buffer state should be active");
+		return -EINVAL;
+	}
+
+	if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
+		CAM_ERR(CAM_MEM,
+			"Released buf handle %d not matching within table %d, idx=%d",
+			cmd->buf_handle, tbl.bufq[idx].buf_handle, idx);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_MEM, "Releasing hdl = %x, idx = %d", cmd->buf_handle, idx);
+	rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
+
+	return rc;
+}
+
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+	struct cam_mem_mgr_memory_desc *out)
+{
+	struct dma_buf *buf = NULL;
+	int ion_fd = -1;
+	int rc = 0;
+	uint32_t heap_id;
+	int32_t ion_flag = 0;
+	uintptr_t kvaddr;
+	dma_addr_t iova = 0;
+	size_t request_len = 0;
+	uint32_t mem_handle;
+	int32_t idx;
+	int32_t smmu_hdl = 0;
+	int32_t num_hdl = 0;
+
+	enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
+
+	if (!inp || !out) {
+		CAM_ERR(CAM_MEM, "Invalid params");
+		return -EINVAL;
+	}
+
+	if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
+		inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
+		inp->flags & CAM_MEM_FLAG_CACHE)) {
+		CAM_ERR(CAM_MEM, "Invalid flags for request mem");
+		return -EINVAL;
+	}
+
+	if (inp->flags & CAM_MEM_FLAG_CACHE)
+		ion_flag |= ION_FLAG_CACHED;
+	else
+		ion_flag &= ~ION_FLAG_CACHED;
+
+	heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
+		ION_HEAP(ION_CAMERA_HEAP_ID);
+
+	rc = cam_mem_util_get_dma_buf(inp->size,
+		heap_id,
+		ion_flag,
+		&buf);
+
+	if (rc) {
+		CAM_ERR(CAM_MEM, "ION alloc failed for shared buffer");
+		goto ion_fail;
+	} else {
+		CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
+	}
+
+	/*
+	 * we are mapping kva always here,
+	 * update flags so that we do unmap properly
+	 */
+	inp->flags |= CAM_MEM_FLAG_KMD_ACCESS;
+	rc = cam_mem_util_map_cpu_va(buf, &kvaddr, &request_len);
+	if (rc) {
+		CAM_ERR(CAM_MEM, "Failed to get kernel vaddr");
+		goto map_fail;
+	}
+
+	if (!inp->smmu_hdl) {
+		CAM_ERR(CAM_MEM, "Invalid SMMU handle");
+		rc = -EINVAL;
+		goto smmu_fail;
+	}
+
+	/* SHARED flag gets precedence, all other flags after it */
+	if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
+		region = CAM_SMMU_REGION_SHARED;
+	} else {
+		if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
+			region = CAM_SMMU_REGION_IO;
+	}
+
+	rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
+		buf,
+		CAM_SMMU_MAP_RW,
+		&iova,
+		&request_len,
+		region);
+
+	if (rc < 0) {
+		CAM_ERR(CAM_MEM, "SMMU mapping failed");
+		goto smmu_fail;
+	}
+
+	smmu_hdl = inp->smmu_hdl;
+	num_hdl = 1;
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto slot_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	mem_handle = GET_MEM_HANDLE(idx, ion_fd);
+	tbl.bufq[idx].dma_buf = buf;
+	tbl.bufq[idx].fd = -1;
+	tbl.bufq[idx].flags = inp->flags;
+	tbl.bufq[idx].buf_handle = mem_handle;
+	tbl.bufq[idx].kmdvaddr = kvaddr;
+
+	tbl.bufq[idx].vaddr = iova;
+
+	tbl.bufq[idx].len = inp->size;
+	tbl.bufq[idx].num_hdl = num_hdl;
+	memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
+		sizeof(int32_t));
+	tbl.bufq[idx].is_imported = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	out->kva = kvaddr;
+	out->iova = (uint32_t)iova;
+	out->smmu_hdl = smmu_hdl;
+	out->mem_handle = mem_handle;
+	out->len = inp->size;
+	out->region = region;
+
+	return rc;
+slot_fail:
+	cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
+		buf, region);
+smmu_fail:
+	cam_mem_util_unmap_cpu_va(buf, kvaddr);
+map_fail:
+	dma_buf_put(buf);
+ion_fail:
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_request_mem);
+
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
+{
+	int32_t idx;
+	int rc;
+
+	if (!inp) {
+		CAM_ERR(CAM_MEM, "Invalid argument");
+		return -EINVAL;
+	}
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
+		return -EINVAL;
+	}
+
+	if (!tbl.bufq[idx].active) {
+		if (tbl.bufq[idx].vaddr == 0) {
+			CAM_ERR(CAM_MEM, "buffer is released already");
+			return 0;
+		}
+		CAM_ERR(CAM_MEM, "Released buffer state should be active");
+		return -EINVAL;
+	}
+
+	if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
+		CAM_ERR(CAM_MEM,
+			"Released buf handle not matching within table");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
+	rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_release_mem);
+
+int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
+	enum cam_smmu_region_id region,
+	struct cam_mem_mgr_memory_desc *out)
+{
+	struct dma_buf *buf = NULL;
+	int rc = 0;
+	int ion_fd = -1;
+	uint32_t heap_id;
+	dma_addr_t iova = 0;
+	size_t request_len = 0;
+	uint32_t mem_handle;
+	int32_t idx;
+	int32_t smmu_hdl = 0;
+	int32_t num_hdl = 0;
+
+	if (!inp || !out) {
+		CAM_ERR(CAM_MEM, "Invalid param(s)");
+		return -EINVAL;
+	}
+
+	if (!inp->smmu_hdl) {
+		CAM_ERR(CAM_MEM, "Invalid SMMU handle");
+		return -EINVAL;
+	}
+
+	if (region != CAM_SMMU_REGION_SECHEAP) {
+		CAM_ERR(CAM_MEM, "Only secondary heap supported");
+		return -EINVAL;
+	}
+
+	heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
+		ION_HEAP(ION_CAMERA_HEAP_ID);
+	rc = cam_mem_util_get_dma_buf(inp->size,
+		heap_id,
+		0,
+		&buf);
+
+	if (rc) {
+		CAM_ERR(CAM_MEM, "ION alloc failed for sec heap buffer");
+		goto ion_fail;
+	} else {
+		CAM_DBG(CAM_MEM, "Got dma_buf = %pK", buf);
+	}
+
+	rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
+		buf,
+		&iova,
+		&request_len);
+
+	if (rc) {
+		CAM_ERR(CAM_MEM, "Reserving secondary heap failed");
+		goto smmu_fail;
+	}
+
+	smmu_hdl = inp->smmu_hdl;
+	num_hdl = 1;
+
+	idx = cam_mem_get_slot();
+	if (idx < 0) {
+		rc = -ENOMEM;
+		goto slot_fail;
+	}
+
+	mutex_lock(&tbl.bufq[idx].q_lock);
+	mem_handle = GET_MEM_HANDLE(idx, ion_fd);
+	tbl.bufq[idx].fd = -1;
+	tbl.bufq[idx].dma_buf = buf;
+	tbl.bufq[idx].flags = inp->flags;
+	tbl.bufq[idx].buf_handle = mem_handle;
+	tbl.bufq[idx].kmdvaddr = 0;
+
+	tbl.bufq[idx].vaddr = iova;
+
+	tbl.bufq[idx].len = request_len;
+	tbl.bufq[idx].num_hdl = num_hdl;
+	memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
+		sizeof(int32_t));
+	tbl.bufq[idx].is_imported = false;
+	mutex_unlock(&tbl.bufq[idx].q_lock);
+
+	out->kva = 0;
+	out->iova = (uint32_t)iova;
+	out->smmu_hdl = smmu_hdl;
+	out->mem_handle = mem_handle;
+	out->len = request_len;
+	out->region = region;
+
+	return rc;
+
+slot_fail:
+	cam_smmu_release_sec_heap(smmu_hdl);
+smmu_fail:
+	dma_buf_put(buf);
+ion_fail:
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
+
+int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
+{
+	int32_t idx;
+	int rc;
+	int32_t smmu_hdl;
+
+	if (!inp) {
+		CAM_ERR(CAM_MEM, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if (inp->region != CAM_SMMU_REGION_SECHEAP) {
+		CAM_ERR(CAM_MEM, "Only secondary heap supported");
+		return -EINVAL;
+	}
+
+	idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
+	if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
+		CAM_ERR(CAM_MEM, "Incorrect index extracted from mem handle");
+		return -EINVAL;
+	}
+
+	if (!tbl.bufq[idx].active) {
+		if (tbl.bufq[idx].vaddr == 0) {
+			CAM_ERR(CAM_MEM, "buffer is released already");
+			return 0;
+		}
+		CAM_ERR(CAM_MEM, "Released buffer state should be active");
+		return -EINVAL;
+	}
+
+	if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
+		CAM_ERR(CAM_MEM,
+			"Released buf handle not matching within table");
+		return -EINVAL;
+	}
+
+	if (tbl.bufq[idx].num_hdl != 1) {
+		CAM_ERR(CAM_MEM,
+			"Sec heap region should have only one smmu hdl");
+		return -ENODEV;
+	}
+
+	memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
+		sizeof(int32_t));
+	if (inp->smmu_hdl != smmu_hdl) {
+		CAM_ERR(CAM_MEM,
+			"Passed SMMU handle doesn't match with internal hdl");
+		return -ENODEV;
+	}
+
+	rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
+	if (rc) {
+		CAM_ERR(CAM_MEM,
+			"Sec heap region release failed");
+		return -ENODEV;
+	}
+
+	CAM_DBG(CAM_MEM, "Releasing hdl = %X", inp->mem_handle);
+	rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
+	if (rc)
+		CAM_ERR(CAM_MEM, "unmapping secondary heap failed");
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
new file mode 100644
index 0000000..f30f743
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_MEM_MGR_H_
+#define _CAM_MEM_MGR_H_
+
+#include <linux/mutex.h>
+#include <linux/dma-buf.h>
+#include <media/cam_req_mgr.h>
+#include "cam_mem_mgr_api.h"
+
+#define CAM_MEM_BUFQ_MAX 1024
+
+/*Enum for possible SMMU operations */
+enum cam_smmu_mapping_client {
+	CAM_SMMU_MAPPING_USER,
+	CAM_SMMU_MAPPING_KERNEL,
+};
+
+/**
+ * struct cam_mem_buf_queue
+ *
+ * @dma_buf:     pointer to the allocated dma_buf in the table
+ * @q_lock:      mutex lock for buffer
+ * @hdls:        list of mapped handles
+ * @num_hdl:     number of handles
+ * @fd:          file descriptor of buffer
+ * @buf_handle:  unique handle for buffer
+ * @align:       alignment for allocation
+ * @len:         size of buffer
+ * @flags:       attributes of buffer
+ * @vaddr:       IOVA of buffer
+ * @kmdvaddr:    Kernel virtual address
+ * @active:      state of the buffer
+ * @is_imported: Flag indicating if buffer is imported from an FD in user space
+ */
+struct cam_mem_buf_queue {
+	struct dma_buf *dma_buf;
+	struct mutex q_lock;
+	int32_t hdls[CAM_MEM_MMU_MAX_HANDLE];
+	int32_t num_hdl;
+	int32_t fd;
+	int32_t buf_handle;
+	int32_t align;
+	size_t len;
+	uint32_t flags;
+	uint64_t vaddr;
+	uintptr_t kmdvaddr;
+	bool active;
+	bool is_imported;
+};
+
+/**
+ * struct cam_mem_table
+ *
+ * @m_lock: mutex lock for table
+ * @bitmap: bitmap of the mem mgr utility
+ * @bits: max bits of the utility
+ * @bufq: array of buffers
+ */
+struct cam_mem_table {
+	struct mutex m_lock;
+	void *bitmap;
+	size_t bits;
+	struct cam_mem_buf_queue bufq[CAM_MEM_BUFQ_MAX];
+};
+
+/**
+ * @brief: Allocates and maps buffer
+ *
+ * @cmd:   Allocation information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd);
+
+/**
+ * @brief: Releases a buffer reference
+ *
+ * @cmd:   Buffer release information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd);
+
+/**
+ * @brief Maps a buffer
+ *
+ * @cmd: Buffer mapping information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd);
+
+/**
+ * @brief: Perform cache ops on the buffer
+ *
+ * @cmd:   Cache ops information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd);
+
+/**
+ * @brief: Initializes the memory manager
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_init(void);
+
+/**
+ * @brief:  Tears down the memory manager
+ *
+ * @return None
+ */
+void cam_mem_mgr_deinit(void);
+
+#endif /* _CAM_MEM_MGR_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
new file mode 100644
index 0000000..404166b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_mem_mgr_api.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_MEM_MGR_API_H_
+#define _CAM_MEM_MGR_API_H_
+
+#include <media/cam_req_mgr.h>
+#include "cam_smmu_api.h"
+
+/**
+ * struct cam_mem_mgr_request_desc
+ *
+ * @size    : Size of memory requested for allocation
+ * @align   : Alignment of requested memory
+ * @smmu_hdl: SMMU handle to identify context bank where memory will be mapped
+ * @flags   : Flags to indicate cached/uncached property
+ * @region  : Region where memory should be allocated
+ */
+struct cam_mem_mgr_request_desc {
+	uint64_t size;
+	uint64_t align;
+	int32_t smmu_hdl;
+	uint32_t flags;
+};
+
+/**
+ * struct cam_mem_mgr_memory_desc
+ *
+ * @kva        : Kernel virtual address of allocated memory
+ * @iova       : IOVA of allocated memory
+ * @smmu_hdl   : SMMU handle of allocated memory
+ * @mem_handle : Mem handle identifying allocated memory
+ * @len        : Length of allocated memory
+ * @region     : Region to which allocated memory belongs
+ */
+struct cam_mem_mgr_memory_desc {
+	uintptr_t kva;
+	uint32_t iova;
+	int32_t smmu_hdl;
+	uint32_t mem_handle;
+	uint64_t len;
+	enum cam_smmu_region_id region;
+};
+
+/**
+ * @brief: Requests a memory buffer
+ *
+ * @inp:   Information specifying requested buffer properties
+ * @out:   Information about allocated buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
+	struct cam_mem_mgr_memory_desc *out);
+
+/**
+ * @brief: Releases a memory buffer
+ *
+ * @inp:   Information specifying buffer to be released
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp);
+
+/**
+ * @brief: Returns IOVA information about buffer
+ *
+ * @buf_handle: Handle of the buffer
+ * @mmu_handle: SMMU handle where buffer is mapped
+ * @iova_ptr  : Pointer to mmu's iova
+ * @len_ptr   : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
+	uint64_t *iova_ptr, size_t *len_ptr);
+/**
+ * @brief: Returns CPU address information about buffer
+ *
+ * @buf_handle: Handle for the buffer
+ * @vaddr_ptr : pointer to kernel virtual address
+ * @len_ptr   : Length of the buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_get_cpu_buf(int32_t buf_handle, uintptr_t *vaddr_ptr,
+	size_t *len);
+
+static inline bool cam_mem_is_secure_buf(int32_t buf_handle)
+{
+	return CAM_MEM_MGR_IS_SECURE_HDL(buf_handle);
+}
+
+/**
+ * @brief: Reserves a memory region
+ *
+ * @inp:  Information specifying requested region properties
+ * @region : Region which is to be reserved
+ * @out   : Information about reserved region
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
+		enum cam_smmu_region_id region,
+		struct cam_mem_mgr_memory_desc *out);
+
+/**
+ * @brief: Frees a memory region
+ *
+ * @inp   : Information about region which is to be freed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp);
+
+#endif /* _CAM_MEM_MGR_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
new file mode 100644
index 0000000..6a1ab19
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -0,0 +1,2903 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_core.h"
+#include "cam_req_mgr_workq.h"
+#include "cam_req_mgr_debug.h"
+#include "cam_trace.h"
+#include "cam_debug_util.h"
+#include "cam_req_mgr_dev.h"
+
+static struct cam_req_mgr_core_device *g_crm_core_dev;
+
+void cam_req_mgr_handle_core_shutdown(void)
+{
+	struct cam_req_mgr_core_session *session;
+	struct cam_req_mgr_core_session *tsession;
+	struct cam_req_mgr_session_info ses_info;
+
+	if (!list_empty(&g_crm_core_dev->session_head)) {
+		list_for_each_entry_safe(session, tsession,
+			&g_crm_core_dev->session_head, entry) {
+			ses_info.session_hdl =
+				session->session_hdl;
+			cam_req_mgr_destroy_session(&ses_info);
+		}
+	}
+}
+
+static int __cam_req_mgr_setup_payload(struct cam_req_mgr_core_workq *workq)
+{
+	int32_t                  i = 0;
+	int                      rc = 0;
+	struct crm_task_payload *task_data = NULL;
+
+	task_data = kcalloc(
+		workq->task.num_task, sizeof(*task_data),
+		GFP_KERNEL);
+	if (!task_data) {
+		rc = -ENOMEM;
+	} else {
+		for (i = 0; i < workq->task.num_task; i++)
+			workq->task.pool[i].payload = &task_data[i];
+	}
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_find_pd_tbl()
+ *
+ * @brief    : Find pipeline delay based table pointer which matches delay
+ * @tbl      : Pointer to list of request table
+ * @delay    : Pipeline delay value to be searched for comparison
+ *
+ * @return   : pointer to request table for matching pipeline delay table.
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_find_pd_tbl(
+	struct cam_req_mgr_req_tbl *tbl, int32_t delay)
+{
+	if (!tbl)
+		return NULL;
+
+	do {
+		if (delay != tbl->pd)
+			tbl = tbl->next;
+		else
+			return tbl;
+	} while (tbl != NULL);
+
+	return NULL;
+}
+
+/**
+ * __cam_req_mgr_inc_idx()
+ *
+ * @brief    : Increment val passed by step size and rollover after max_val
+ * @val      : value to be incremented
+ * @step     : amount/step by which val is incremented
+ * @max_val  : max val after which idx will roll over
+ *
+ */
+static void __cam_req_mgr_inc_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+	*val = (*val + step) % max_val;
+}
+
+/**
+ * __cam_req_mgr_dec_idx()
+ *
+ * @brief    : Decrement val passed by step size and rollover after max_val
+ * @val      : value to be decremented
+ * @step     : amount/step by which val is decremented
+ * @max_val  : after zero value will roll over to max val
+ *
+ */
+static void __cam_req_mgr_dec_idx(int32_t *val, int32_t step, int32_t max_val)
+{
+	*val = *val - step;
+	if (*val < 0)
+		*val = max_val + (*val);
+}
+
+/**
+ * __cam_req_mgr_validate_inject_delay()
+ *
+ * @brief    : Check if any pd device is introducing inject delay
+ * @tbl      : cam_req_mgr_req_tbl
+ * @curr_idx : slot idx
+ *
+ * @return   : 0 for success, negative for failure
+ */
+static int __cam_req_mgr_validate_inject_delay(
+	struct cam_req_mgr_req_tbl  *tbl,
+	int32_t curr_idx)
+{
+	struct cam_req_mgr_tbl_slot *slot = NULL;
+
+	while (tbl) {
+		slot = &tbl->slot[curr_idx];
+		if (slot->inject_delay > 0) {
+			slot->inject_delay--;
+			return -EAGAIN;
+		}
+		__cam_req_mgr_dec_idx(&curr_idx, tbl->pd_delta,
+			tbl->num_slots);
+		tbl = tbl->next;
+	}
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_traverse()
+ *
+ * @brief    : Traverse through pd tables, it will internally cover all linked
+ *             pd tables. Each pd table visited will check if idx passed to its
+ *             in ready state. If ready means all devices linked to the pd table
+ *             have this request id packet ready. Then it calls subsequent pd
+ *             tbl with new idx. New idx value takes into account the delta
+ *             between current pd table and next one.
+ * @traverse_data: contains all the info to traverse through pd tables
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_traverse(struct cam_req_mgr_traverse *traverse_data)
+{
+	int                          rc = 0;
+	int32_t                      next_idx = traverse_data->idx;
+	int32_t                      curr_idx = traverse_data->idx;
+	struct cam_req_mgr_req_tbl  *tbl;
+	struct cam_req_mgr_apply    *apply_data;
+	struct cam_req_mgr_tbl_slot *slot = NULL;
+
+	if (!traverse_data->tbl || !traverse_data->apply_data) {
+		CAM_ERR(CAM_CRM, "NULL pointer %pK %pK",
+			traverse_data->tbl, traverse_data->apply_data);
+		traverse_data->result = 0;
+		return -EINVAL;
+	}
+
+	tbl = traverse_data->tbl;
+	apply_data = traverse_data->apply_data;
+	slot = &tbl->slot[curr_idx];
+	CAM_DBG(CAM_CRM,
+		"Enter pd %d idx %d state %d skip %d status %d skip_idx %d",
+		tbl->pd, curr_idx, tbl->slot[curr_idx].state,
+		tbl->skip_traverse, traverse_data->in_q->slot[curr_idx].status,
+		traverse_data->in_q->slot[curr_idx].skip_idx);
+
+	if ((traverse_data->self_link == true) &&
+		(!traverse_data->inject_delay_chk)) {
+		rc = __cam_req_mgr_validate_inject_delay(tbl, curr_idx);
+		if (rc) {
+			CAM_DBG(CAM_CRM, "Injecting Delay of one frame");
+			apply_data[tbl->pd].req_id = -1;
+			/* This pd tbl not ready to proceed with asked idx */
+			SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+			return -EAGAIN;
+		}
+		traverse_data->inject_delay_chk = true;
+	}
+
+	/* Check if req is ready or in skip mode or pd tbl is in skip mode */
+	if (tbl->slot[curr_idx].state == CRM_REQ_STATE_READY ||
+		traverse_data->in_q->slot[curr_idx].skip_idx == 1 ||
+		tbl->skip_traverse > 0) {
+		if (tbl->next) {
+			__cam_req_mgr_dec_idx(&next_idx, tbl->pd_delta,
+				tbl->num_slots);
+			traverse_data->idx = next_idx;
+			traverse_data->tbl = tbl->next;
+			rc = __cam_req_mgr_traverse(traverse_data);
+		}
+		if (rc >= 0) {
+			SET_SUCCESS_BIT(traverse_data->result, tbl->pd);
+
+			if (traverse_data->validate_only == false) {
+				apply_data[tbl->pd].pd = tbl->pd;
+				apply_data[tbl->pd].req_id =
+					CRM_GET_REQ_ID(
+					traverse_data->in_q, curr_idx);
+				apply_data[tbl->pd].idx = curr_idx;
+
+				CAM_DBG(CAM_CRM, "req_id: %lld with pd of %d",
+				apply_data[tbl->pd].req_id,
+				apply_data[tbl->pd].pd);
+				/*
+				 * If traverse is successful decrement
+				 * traverse skip
+				 */
+				if (tbl->skip_traverse > 0) {
+					apply_data[tbl->pd].req_id = -1;
+					tbl->skip_traverse--;
+				}
+			}
+		} else {
+			/* linked pd table is not ready for this traverse yet */
+			return rc;
+		}
+	} else {
+		/* This pd table is not ready to proceed with asked idx */
+		CAM_INFO(CAM_CRM,
+			"Skip Frame: req: %lld not ready pd: %d open_req count: %d",
+			CRM_GET_REQ_ID(traverse_data->in_q, curr_idx),
+			tbl->pd,
+			traverse_data->open_req_cnt);
+		SET_FAILURE_BIT(traverse_data->result, tbl->pd);
+		return -EAGAIN;
+	}
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_in_q_skip_idx()
+ *
+ * @brief    : Decrement val passed by step size and rollover after max_val
+ * @in_q     : input queue pointer
+ * @idx      : Sets skip_idx bit of the particular slot to true so when traverse
+ *             happens for this idx, no req will be submitted for devices
+ *             handling this idx.
+ *
+ */
+static void __cam_req_mgr_in_q_skip_idx(struct cam_req_mgr_req_queue *in_q,
+	int32_t idx)
+{
+	in_q->slot[idx].req_id = -1;
+	in_q->slot[idx].skip_idx = 1;
+	in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+	CAM_DBG(CAM_CRM, "SET IDX SKIP on slot= %d", idx);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_id()
+ *
+ * @brief    : Set unique id to table
+ * @tbl      : pipeline based table which requires new id
+ * @req      : pointer to request data wihch contains num_tables counter
+ *
+ */
+static void __cam_req_mgr_tbl_set_id(struct cam_req_mgr_req_tbl *tbl,
+	struct cam_req_mgr_req_data *req)
+{
+	if (!tbl)
+		return;
+	do {
+		tbl->id = req->num_tbl++;
+		CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
+			tbl->id, tbl->pd, tbl->skip_traverse,
+			tbl->pd_delta);
+		tbl = tbl->next;
+	} while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_tbl_set_all_skip_cnt()
+ *
+ * @brief    : Each pd table sets skip value based on delta between itself and
+ *             max pd value. During initial streamon or bubble case this is
+ *             used. That way each pd table skips required num of traverse and
+ *             align themselve with req mgr connected devs.
+ * @l_tbl    : iterates through list of pd tables and sets skip traverse
+ *
+ */
+static void __cam_req_mgr_tbl_set_all_skip_cnt(
+	struct cam_req_mgr_req_tbl **l_tbl)
+{
+	struct cam_req_mgr_req_tbl *tbl = *l_tbl;
+	int32_t                     max_pd;
+
+	if (!tbl)
+		return;
+
+	max_pd = tbl->pd;
+	do {
+		tbl->skip_traverse = max_pd - tbl->pd;
+		CAM_DBG(CAM_CRM, "%d: pd %d skip_traverse %d delta %d",
+			tbl->id, tbl->pd, tbl->skip_traverse,
+			tbl->pd_delta);
+		tbl = tbl->next;
+	} while (tbl != NULL);
+}
+
+/**
+ * __cam_req_mgr_reset_req_slot()
+ *
+ * @brief    : reset specified idx/slot in input queue as well as all pd tables
+ * @link     : link pointer
+ * @idx      : slot index which will be reset
+ *
+ */
+static void __cam_req_mgr_reset_req_slot(struct cam_req_mgr_core_link *link,
+	int32_t idx)
+{
+	struct cam_req_mgr_slot      *slot;
+	struct cam_req_mgr_req_tbl   *tbl = link->req.l_tbl;
+	struct cam_req_mgr_req_queue *in_q = link->req.in_q;
+
+	slot = &in_q->slot[idx];
+	CAM_DBG(CAM_CRM, "RESET: idx: %d: slot->status %d", idx, slot->status);
+
+	/* Check if CSL has already pushed new request*/
+	if (slot->status == CRM_SLOT_STATUS_REQ_ADDED)
+		return;
+
+	/* Reset input queue slot */
+	slot->req_id = -1;
+	slot->skip_idx = 0;
+	slot->recover = 0;
+	slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+	slot->status = CRM_SLOT_STATUS_NO_REQ;
+
+	/* Reset all pd table slot */
+	while (tbl != NULL) {
+		CAM_DBG(CAM_CRM, "pd: %d: idx %d state %d",
+			tbl->pd, idx, tbl->slot[idx].state);
+		tbl->slot[idx].req_ready_map = 0;
+		tbl->slot[idx].state = CRM_REQ_STATE_EMPTY;
+		tbl = tbl->next;
+	}
+}
+
+/**
+ * __cam_req_mgr_check_next_req_slot()
+ *
+ * @brief    : While streaming if input queue does not contain any pending
+ *             request, req mgr still needs to submit pending request ids to
+ *             devices with lower pipeline delay value.
+ * @in_q     : Pointer to input queue where req mgr wil peep into
+ *
+ */
+static void __cam_req_mgr_check_next_req_slot(
+	struct cam_req_mgr_req_queue *in_q)
+{
+	int32_t                  idx = in_q->rd_idx;
+	struct cam_req_mgr_slot *slot;
+
+	__cam_req_mgr_inc_idx(&idx, 1, in_q->num_slots);
+	slot = &in_q->slot[idx];
+
+	CAM_DBG(CAM_CRM, "idx: %d: slot->status %d", idx, slot->status);
+
+	/* Check if there is new req from CSL, if not complete req */
+	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+		__cam_req_mgr_in_q_skip_idx(in_q, idx);
+		if (in_q->wr_idx != idx)
+			CAM_WARN(CAM_CRM,
+				"CHECK here wr %d, rd %d", in_q->wr_idx, idx);
+		__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+	}
+}
+
+/**
+ * __cam_req_mgr_send_req()
+ *
+ * @brief    : send request id to be applied to each device connected on link
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @in_q     : pointer to input request queue
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_send_req(struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_req_queue *in_q, uint32_t trigger)
+{
+	int                                  rc = 0, pd, i, idx;
+	struct cam_req_mgr_connected_device *dev = NULL;
+	struct cam_req_mgr_apply_request     apply_req;
+	struct cam_req_mgr_link_evt_data     evt_data;
+	struct cam_req_mgr_tbl_slot          *slot = NULL;
+
+	apply_req.link_hdl = link->link_hdl;
+	apply_req.report_if_bubble = 0;
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (!dev)
+			continue;
+		pd = dev->dev_info.p_delay;
+		if (pd >= CAM_PIPELINE_DELAY_MAX) {
+			CAM_WARN(CAM_CRM, "pd %d greater than max",
+				pd);
+			continue;
+		}
+
+		idx = link->req.apply_data[pd].idx;
+		slot = &dev->pd_tbl->slot[idx];
+		/*
+		 * Just let flash go for this request and other
+		 * device get restricted
+		 */
+
+		if ((slot->skip_next_frame != true) ||
+			(slot->dev_hdl != dev->dev_hdl))
+			continue;
+
+		if (!(dev->dev_info.trigger & trigger))
+			continue;
+
+		apply_req.dev_hdl = dev->dev_hdl;
+		apply_req.request_id =
+			link->req.apply_data[pd].req_id;
+		apply_req.trigger_point = trigger;
+		if (dev->ops && dev->ops->apply_req) {
+			rc = dev->ops->apply_req(&apply_req);
+			if (rc)
+				return rc;
+			CAM_DBG(CAM_REQ,
+				"SEND: link_hdl: %x pd: %d req_id %lld",
+				link->link_hdl, pd, apply_req.request_id);
+			slot->skip_next_frame = false;
+			slot->is_applied = true;
+			return -EAGAIN;
+		}
+	}
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev) {
+			pd = dev->dev_info.p_delay;
+			if (pd >= CAM_PIPELINE_DELAY_MAX) {
+				CAM_WARN(CAM_CRM, "pd %d greater than max",
+					pd);
+				continue;
+			}
+			if (link->req.apply_data[pd].skip_idx ||
+				link->req.apply_data[pd].req_id < 0) {
+				CAM_DBG(CAM_CRM, "skip %d req_id %lld",
+					link->req.apply_data[pd].skip_idx,
+					link->req.apply_data[pd].req_id);
+				continue;
+			}
+			if (!(dev->dev_info.trigger & trigger))
+				continue;
+
+			apply_req.dev_hdl = dev->dev_hdl;
+			apply_req.request_id =
+				link->req.apply_data[pd].req_id;
+			idx = link->req.apply_data[pd].idx;
+			slot = &dev->pd_tbl->slot[idx];
+			apply_req.report_if_bubble =
+				in_q->slot[idx].recover;
+
+			if ((slot->dev_hdl == dev->dev_hdl) &&
+				(slot->is_applied == true)) {
+				slot->is_applied = false;
+				continue;
+			}
+
+			trace_cam_req_mgr_apply_request(link, &apply_req, dev);
+
+			apply_req.trigger_point = trigger;
+			CAM_DBG(CAM_REQ,
+				"SEND: link_hdl: %x pd %d req_id %lld",
+				link->link_hdl, pd, apply_req.request_id);
+			if (dev->ops && dev->ops->apply_req) {
+				rc = dev->ops->apply_req(&apply_req);
+				if (rc < 0)
+					break;
+
+				if (pd == link->max_delay)
+					link->open_req_cnt--;
+			}
+		}
+	}
+	if (rc < 0) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "APPLY FAILED pd %d req_id %lld",
+			dev->dev_info.p_delay, apply_req.request_id);
+		/* Apply req failed notify already applied devs */
+		for (; i >= 0; i--) {
+			dev = &link->l_dev[i];
+			evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_ERR;
+			evt_data.dev_hdl = dev->dev_hdl;
+			evt_data.link_hdl =  link->link_hdl;
+			evt_data.req_id = apply_req.request_id;
+			evt_data.u.error = CRM_KMD_ERR_BUBBLE;
+			if (dev->ops && dev->ops->process_evt)
+				dev->ops->process_evt(&evt_data);
+		}
+	}
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_check_link_is_ready()
+ *
+ * @brief    : traverse through all request tables and see if all devices are
+ *             ready to apply request settings.
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @idx      : index within input request queue
+ * @validate_only : Whether to validate only and/or update settings
+ * @self_link : To indicate whether the validation is for the given link or
+ *              other sync link
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_check_link_is_ready(struct cam_req_mgr_core_link *link,
+	int32_t idx, bool validate_only, bool self_link)
+{
+	int                            rc;
+	struct cam_req_mgr_traverse    traverse_data;
+	struct cam_req_mgr_req_queue  *in_q;
+	struct cam_req_mgr_apply      *apply_data;
+
+	in_q = link->req.in_q;
+
+	apply_data = link->req.apply_data;
+
+	if (validate_only == false) {
+		memset(apply_data, 0,
+		    sizeof(struct cam_req_mgr_apply) * CAM_PIPELINE_DELAY_MAX);
+	}
+
+	traverse_data.apply_data = apply_data;
+	traverse_data.idx = idx;
+	traverse_data.tbl = link->req.l_tbl;
+	traverse_data.in_q = in_q;
+	traverse_data.result = 0;
+	traverse_data.validate_only = validate_only;
+	traverse_data.self_link = self_link;
+	traverse_data.inject_delay_chk = false;
+	traverse_data.open_req_cnt = link->open_req_cnt;
+	/*
+	 *  Traverse through all pd tables, if result is success,
+	 *  apply the settings
+	 */
+
+	rc = __cam_req_mgr_traverse(&traverse_data);
+	CAM_DBG(CAM_CRM,
+		"SOF: idx %d self_link %d validate %d result %x pd_mask %x rc %d",
+		idx, traverse_data.self_link, traverse_data.validate_only,
+		traverse_data.result, link->pd_mask, rc);
+
+	if (!rc && traverse_data.result == link->pd_mask) {
+		CAM_DBG(CAM_CRM,
+			"READY: link_hdl= %x idx= %d, req_id= %lld :%lld :%lld",
+			link->link_hdl, idx,
+			apply_data[2].req_id,
+			apply_data[1].req_id,
+			apply_data[0].req_id);
+	} else
+		rc = -EAGAIN;
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_find_slot_for_req()
+ *
+ * @brief    : Find idx from input queue at which req id is enqueued
+ * @in_q     : input request queue pointer
+ * @req_id   : request id which needs to be searched in input queue
+ *
+ * @return   : slot index where passed request id is stored, -1 for failure
+ *
+ */
+static int32_t __cam_req_mgr_find_slot_for_req(
+	struct cam_req_mgr_req_queue *in_q, int64_t req_id)
+{
+	int32_t                   idx, i;
+	struct cam_req_mgr_slot  *slot;
+
+	idx = in_q->rd_idx;
+	for (i = 0; i < in_q->num_slots; i++) {
+		slot = &in_q->slot[idx];
+		if (slot->req_id == req_id) {
+			CAM_DBG(CAM_CRM,
+				"req: %lld found at idx: %d status: %d sync_mode: %d",
+				req_id, idx, slot->status, slot->sync_mode);
+			break;
+		}
+		__cam_req_mgr_dec_idx(&idx, 1, in_q->num_slots);
+	}
+	if (i >= in_q->num_slots)
+		idx = -1;
+
+	return idx;
+}
+
+/**
+ * __cam_req_mgr_reset_sof_cnt()
+ *
+ * @brief    : the sof_count for both the links are reset
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ */
+static void __cam_req_mgr_reset_sof_cnt(
+	struct cam_req_mgr_core_link *link)
+{
+	link->sof_counter = -1;
+	link->sync_link->sof_counter = -1;
+	link->frame_skip_flag = false;
+
+	CAM_DBG(CAM_CRM,
+		"link_hdl %x self_counter %lld other_counter %lld frame_skip_lag %d",
+		link->link_hdl, link->sof_counter,
+		link->sync_link->sof_counter, link->frame_skip_flag);
+}
+
+/**
+ * __cam_req_mgr_sof_cnt_initialize()
+ *
+ * @brief    : when the sof count is intially -1 it increments count
+ *             and computes the sync_self_ref for this link
+ *             the count needs to be wrapped back starting from 0
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ */
+static void __cam_req_mgr_sof_cnt_initialize(
+	struct cam_req_mgr_core_link *link)
+{
+	link->sof_counter++;
+	link->sync_self_ref = link->sof_counter -
+		link->sync_link->sof_counter;
+
+	CAM_DBG(CAM_CRM,
+		"link_hdl %x self_counter %lld other_counter %lld",
+		link->link_hdl, link->sof_counter,
+		link->sync_link->sof_counter);
+}
+
+/**
+ * __cam_req_mgr_wrap_sof_cnt()
+ *
+ * @brief    : once the sof count reaches a predefined maximum
+ *             the count needs to be wrapped back starting from 0
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ */
+static void __cam_req_mgr_wrap_sof_cnt(
+	struct cam_req_mgr_core_link *link)
+{
+	link->sof_counter = (MAX_SYNC_COUNT -
+		(link->sync_link->sof_counter));
+	link->sync_link->sof_counter = 0;
+
+	CAM_DBG(CAM_CRM,
+		"link_hdl %x self_counter %lld sync_link_hdl %x other_counter %lld",
+		link->link_hdl, link->sof_counter,
+		link->sync_link->link_hdl, link->sync_link->sof_counter);
+}
+
+/**
+ * __cam_req_mgr_validate_sof_cnt()
+ *
+ * @brief     : validates sof count difference for a given link
+ * @link      : pointer to link whose input queue and req tbl are
+ *              traversed through
+ * @sync_link : pointer to the sync link
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_validate_sof_cnt(
+	struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_core_link *sync_link)
+{
+	int64_t sync_diff = 0;
+	int rc = 0;
+
+	if (link->sof_counter == MAX_SYNC_COUNT)
+		__cam_req_mgr_wrap_sof_cnt(link);
+
+	sync_diff = link->sof_counter - sync_link->sof_counter;
+
+	CAM_DBG(CAM_CRM,
+		"link[%x] self_counter=%lld other_counter=%lld diff=%lld sync_self_ref=%lld",
+		link->link_hdl, link->sof_counter,
+		sync_link->sof_counter, sync_diff, link->sync_self_ref);
+
+	if (sync_diff > SYNC_LINK_SOF_CNT_MAX_LMT) {
+		link->sync_link->frame_skip_flag = true;
+		CAM_WARN(CAM_CRM,
+			"Detected anomaly, skip link_hdl %x self_counter=%lld other_counter=%lld sync_self_ref=%lld",
+			link->link_hdl, link->sof_counter,
+			sync_link->sof_counter, link->sync_self_ref);
+		rc = -EPERM;
+	}
+
+	return rc;
+}
+
+
+/**
+ * __cam_req_mgr_process_sync_req()
+ *
+ * @brief    : processes requests during sync mode
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ * @slot     : pointer to the current slot being processed
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_process_sync_req(
+	struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_slot *slot)
+{
+	struct cam_req_mgr_core_link *sync_link = NULL;
+	int64_t req_id = 0;
+	int sync_slot_idx = 0, rc = 0;
+
+	if (!link->sync_link) {
+		CAM_ERR(CAM_CRM, "Sync link null");
+		return -EINVAL;
+	}
+
+	sync_link = link->sync_link;
+	req_id = slot->req_id;
+
+	CAM_DBG(CAM_REQ,
+		"link_hdl %x req %lld sync_self_ref %lld sof_counter %lld frame_skip_flag %d sync_link_self_ref %lld",
+		link->link_hdl, req_id, link->sync_self_ref, link->sof_counter,
+		link->frame_skip_flag, link->sync_link->sync_self_ref);
+
+	if (sync_link->sync_link_sof_skip) {
+		CAM_DBG(CAM_REQ,
+			"No req applied on corresponding SOF on sync link: %x",
+			sync_link->link_hdl);
+		sync_link->sync_link_sof_skip = false;
+		/*It is to manage compensate inject delay for each pd*/
+		__cam_req_mgr_check_link_is_ready(link, slot->idx, true, true);
+		return -EINVAL;
+	}
+
+	if (link->sof_counter == -1) {
+		__cam_req_mgr_sof_cnt_initialize(link);
+	} else if ((link->frame_skip_flag) &&
+		(sync_link->sync_self_ref != -1)) {
+		CAM_DBG(CAM_REQ, "Link[%x] Req[%lld] Resetting values ",
+			link->link_hdl, req_id);
+		__cam_req_mgr_reset_sof_cnt(link);
+		__cam_req_mgr_sof_cnt_initialize(link);
+	} else {
+		link->sof_counter++;
+	}
+
+	rc = __cam_req_mgr_check_link_is_ready(link, slot->idx, true, true);
+	if (rc) {
+		CAM_DBG(CAM_REQ,
+			"Req: %lld [My link] not ready on link: %x, rc=%d",
+			req_id, link->link_hdl, rc);
+		link->sync_link_sof_skip = true;
+		goto failure;
+	}
+
+	sync_slot_idx = __cam_req_mgr_find_slot_for_req(
+		sync_link->req.in_q, req_id);
+
+	if (sync_slot_idx != -1) {
+		rc = __cam_req_mgr_check_link_is_ready(
+			sync_link, sync_slot_idx, true, false);
+		CAM_DBG(CAM_CRM, "sync_slot_idx=%d, status=%d, rc=%d",
+			sync_slot_idx,
+			sync_link->req.in_q->slot[sync_slot_idx].status,
+			rc);
+	} else {
+		CAM_DBG(CAM_CRM, "sync_slot_idx=%d, rc=%d",
+			sync_slot_idx, rc);
+	}
+
+	if ((sync_slot_idx != -1) &&
+		((sync_link->req.in_q->slot[sync_slot_idx].status ==
+		CRM_SLOT_STATUS_REQ_APPLIED) || (rc == 0))) {
+		rc = __cam_req_mgr_validate_sof_cnt(link, sync_link);
+		if (rc) {
+			CAM_DBG(CAM_CRM,
+				"Req: %lld validate failed: %x",
+				req_id, sync_link->link_hdl);
+			goto failure;
+		}
+
+		CAM_DBG(CAM_REQ,
+			"Req: %lld ready to apply on link: %x [validation successful]",
+			req_id, link->link_hdl);
+		/*
+		 *  At this point all validation is successfully done
+		 *  and we can proceed to apply the given request.
+		 *  Ideally the next call should return success.
+		 */
+		rc = __cam_req_mgr_check_link_is_ready(link,
+			slot->idx, false, true);
+
+		if (rc)
+			CAM_WARN(CAM_CRM, "Unexpected return value rc: %d", rc);
+	} else {
+		CAM_DBG(CAM_REQ,
+			"Req: %lld [Other link] not ready to apply on link: %x",
+			req_id, sync_link->link_hdl);
+		rc = -EPERM;
+		link->sync_link_sof_skip = true;
+		goto failure;
+	}
+
+	return rc;
+
+failure:
+	link->sof_counter--;
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_process_req()
+ *
+ * @brief    : processes read index in request queue and traverse through table
+ * @link     : pointer to link whose input queue and req tbl are
+ *             traversed through
+ *
+ * @return   : 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_process_req(struct cam_req_mgr_core_link *link,
+	uint32_t trigger)
+{
+	int                                  rc = 0, idx;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_req_queue        *in_q;
+	struct cam_req_mgr_core_session     *session;
+
+	in_q = link->req.in_q;
+	session = (struct cam_req_mgr_core_session *)link->parent;
+	mutex_lock(&session->lock);
+	/*
+	 * Check if new read index,
+	 * - if in pending  state, traverse again to complete
+	 *    transaction of this read index.
+	 * - if in applied_state, somthign wrong.
+	 * - if in no_req state, no new req
+	 */
+	CAM_DBG(CAM_REQ, "SOF Req[%lld] idx %d req_status %d link_hdl %x",
+		in_q->slot[in_q->rd_idx].req_id, in_q->rd_idx,
+		in_q->slot[in_q->rd_idx].status, link->link_hdl);
+
+	slot = &in_q->slot[in_q->rd_idx];
+	if (slot->status == CRM_SLOT_STATUS_NO_REQ) {
+		CAM_DBG(CAM_CRM, "No Pending req");
+		rc = 0;
+		goto error;
+	}
+
+	if ((trigger != CAM_TRIGGER_POINT_SOF) &&
+		(trigger != CAM_TRIGGER_POINT_EOF))
+		goto error;
+
+	if ((trigger == CAM_TRIGGER_POINT_EOF) &&
+		(!(link->trigger_mask & CAM_TRIGGER_POINT_SOF))) {
+		CAM_DBG(CAM_CRM, "Applying for last SOF fails");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (trigger == CAM_TRIGGER_POINT_SOF) {
+		if (link->trigger_mask) {
+			CAM_ERR_RATE_LIMIT(CAM_CRM,
+				"Applying for last EOF fails");
+			rc = -EINVAL;
+			goto error;
+		}
+
+		if (slot->sync_mode == CAM_REQ_MGR_SYNC_MODE_SYNC)
+			rc = __cam_req_mgr_process_sync_req(link, slot);
+		else
+			rc = __cam_req_mgr_check_link_is_ready(link,
+				slot->idx, false, true);
+
+		if (rc < 0) {
+			/*
+			 * If traverse result is not success, then some devices
+			 * are not ready with packet for the asked request id,
+			 * hence try again in next sof
+			 */
+			slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+			spin_lock_bh(&link->link_state_spin_lock);
+			if (link->state == CAM_CRM_LINK_STATE_ERR) {
+				/*
+				 * During error recovery all tables should be
+				 * ready, don't expect to enter here.
+				 * @TODO: gracefully handle if recovery fails.
+				 */
+				CAM_ERR_RATE_LIMIT(CAM_CRM,
+					"FATAL recovery cant finish idx %d status %d",
+					in_q->rd_idx,
+					in_q->slot[in_q->rd_idx].status);
+				rc = -EPERM;
+			}
+			spin_unlock_bh(&link->link_state_spin_lock);
+			goto error;
+		}
+	}
+
+	rc = __cam_req_mgr_send_req(link, link->req.in_q, trigger);
+	if (rc < 0) {
+		/* Apply req failed retry at next sof */
+		slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+	} else {
+		link->trigger_mask |= trigger;
+
+		CAM_DBG(CAM_CRM, "Applied req[%lld] on link[%x] success",
+			slot->req_id, link->link_hdl);
+		spin_lock_bh(&link->link_state_spin_lock);
+		if (link->state == CAM_CRM_LINK_STATE_ERR) {
+			CAM_WARN(CAM_CRM, "Err recovery done idx %d",
+				in_q->rd_idx);
+			link->state = CAM_CRM_LINK_STATE_READY;
+		}
+		spin_unlock_bh(&link->link_state_spin_lock);
+
+		if (link->sync_link_sof_skip)
+			link->sync_link_sof_skip = false;
+
+		if (link->trigger_mask == link->subscribe_event) {
+			slot->status = CRM_SLOT_STATUS_REQ_APPLIED;
+			link->trigger_mask = 0;
+			CAM_DBG(CAM_CRM, "req %d is applied on link %x",
+				slot->req_id,
+				link->link_hdl);
+			idx = in_q->rd_idx;
+			__cam_req_mgr_dec_idx(
+				&idx, link->max_delay + 1,
+				in_q->num_slots);
+			__cam_req_mgr_reset_req_slot(link, idx);
+		}
+	}
+
+	mutex_unlock(&session->lock);
+	return rc;
+error:
+	mutex_unlock(&session->lock);
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_add_tbl_to_link()
+ *
+ * @brief    : Add table to list under link sorted by pd decremeting order
+ * @l_tbl    : list of pipeline delay tables.
+ * @new_tbl  : new tbl which will be appended to above list as per its pd value
+ *
+ */
+static void __cam_req_mgr_add_tbl_to_link(struct cam_req_mgr_req_tbl **l_tbl,
+	struct cam_req_mgr_req_tbl *new_tbl)
+{
+	struct cam_req_mgr_req_tbl *tbl;
+
+	if (!(*l_tbl) || (*l_tbl)->pd < new_tbl->pd) {
+		new_tbl->next = *l_tbl;
+		if (*l_tbl) {
+			new_tbl->pd_delta =
+				new_tbl->pd - (*l_tbl)->pd;
+		}
+		*l_tbl = new_tbl;
+	} else {
+		tbl = *l_tbl;
+
+		/* Reach existing  tbl which has less pd value */
+		while (tbl->next != NULL &&
+			new_tbl->pd < tbl->next->pd) {
+			tbl = tbl->next;
+		}
+		if (tbl->next != NULL) {
+			new_tbl->pd_delta =
+				new_tbl->pd - tbl->next->pd;
+		} else {
+			/* This is last table in linked list*/
+			new_tbl->pd_delta = 0;
+		}
+		new_tbl->next = tbl->next;
+		tbl->next = new_tbl;
+		tbl->pd_delta = tbl->pd - new_tbl->pd;
+	}
+	CAM_DBG(CAM_CRM, "added pd %d tbl to link delta %d", new_tbl->pd,
+		new_tbl->pd_delta);
+}
+
+/**
+ * __cam_req_mgr_create_pd_tbl()
+ *
+ * @brief    : Creates new request table for new delay value
+ * @delay    : New pd table allocated will have this delay value
+ *
+ * @return   : pointer to newly allocated table, NULL for failure
+ *
+ */
+static struct cam_req_mgr_req_tbl *__cam_req_mgr_create_pd_tbl(int32_t delay)
+{
+	struct cam_req_mgr_req_tbl *tbl =
+		kzalloc(sizeof(struct cam_req_mgr_req_tbl), GFP_KERNEL);
+	if (tbl != NULL) {
+		tbl->num_slots = MAX_REQ_SLOTS;
+		CAM_DBG(CAM_CRM, "pd= %d slots= %d", delay, tbl->num_slots);
+	}
+
+	return tbl;
+}
+
+/**
+ * __cam_req_mgr_destroy_all_tbl()
+ *
+ * @brief   : This func will destroy all pipeline delay based req table structs
+ * @l_tbl    : pointer to first table in list and it has max pd .
+ *
+ */
+static void __cam_req_mgr_destroy_all_tbl(struct cam_req_mgr_req_tbl **l_tbl)
+{
+	struct cam_req_mgr_req_tbl  *tbl = *l_tbl, *temp;
+
+	CAM_DBG(CAM_CRM, "*l_tbl %pK", tbl);
+	while (tbl != NULL) {
+		temp = tbl->next;
+		kfree(tbl);
+		tbl = temp;
+	}
+	*l_tbl = NULL;
+}
+
+/**
+ * __cam_req_mgr_setup_in_q()
+ *
+ * @brief : Initialize req table data
+ * @req   : request data pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int  __cam_req_mgr_setup_in_q(struct cam_req_mgr_req_data *req)
+{
+	int                           i;
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+	if (!in_q) {
+		CAM_ERR(CAM_CRM, "NULL in_q");
+		return -EINVAL;
+	}
+
+	mutex_lock(&req->lock);
+	in_q->num_slots = MAX_REQ_SLOTS;
+
+	for (i = 0; i < in_q->num_slots; i++) {
+		in_q->slot[i].idx = i;
+		in_q->slot[i].req_id = -1;
+		in_q->slot[i].skip_idx = 0;
+		in_q->slot[i].status = CRM_SLOT_STATUS_NO_REQ;
+	}
+
+	in_q->wr_idx = 0;
+	in_q->rd_idx = 0;
+	mutex_unlock(&req->lock);
+
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_reset_req_tbl()
+ *
+ * @brief : Initialize req table data
+ * @req   : request queue pointer
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_reset_in_q(struct cam_req_mgr_req_data *req)
+{
+	struct cam_req_mgr_req_queue *in_q = req->in_q;
+
+	if (!in_q) {
+		CAM_ERR(CAM_CRM, "NULL in_q");
+		return -EINVAL;
+	}
+
+	mutex_lock(&req->lock);
+	memset(in_q->slot, 0,
+		sizeof(struct cam_req_mgr_slot) * in_q->num_slots);
+	in_q->num_slots = 0;
+
+	in_q->wr_idx = 0;
+	in_q->rd_idx = 0;
+	mutex_unlock(&req->lock);
+
+	return 0;
+}
+
+/**
+ * __cam_req_mgr_notify_sof_freeze()
+ *
+ * @brief : Notify devices on link on detecting a SOF freeze
+ * @link  : link on which the sof freeze was detected
+ *
+ */
+static void __cam_req_mgr_notify_sof_freeze(
+	struct cam_req_mgr_core_link *link)
+{
+	int                                  i = 0;
+	struct cam_req_mgr_link_evt_data     evt_data;
+	struct cam_req_mgr_connected_device *dev = NULL;
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_SOF_FREEZE;
+		evt_data.dev_hdl = dev->dev_hdl;
+		evt_data.link_hdl =  link->link_hdl;
+		evt_data.req_id = 0;
+		evt_data.u.error = CRM_KMD_ERR_FATAL;
+		if (dev->ops && dev->ops->process_evt)
+			dev->ops->process_evt(&evt_data);
+	}
+}
+
+/**
+ * __cam_req_mgr_process_sof_freeze()
+ *
+ * @brief : Apoptosis - Handles case when connected devices are not responding
+ * @priv  : link information
+ * @data  : task data
+ *
+ */
+static int __cam_req_mgr_process_sof_freeze(void *priv, void *data)
+{
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_core_session *session = NULL;
+	struct cam_req_mgr_message       msg;
+	int rc = 0;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+		return -EINVAL;
+	}
+
+	link = (struct cam_req_mgr_core_link *)priv;
+	session = (struct cam_req_mgr_core_session *)link->parent;
+
+	CAM_ERR(CAM_CRM, "SOF freeze for session %d link 0x%x",
+		session->session_hdl, link->link_hdl);
+
+	__cam_req_mgr_notify_sof_freeze(link);
+	memset(&msg, 0, sizeof(msg));
+
+	msg.session_hdl = session->session_hdl;
+	msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_DEVICE;
+	msg.u.err_msg.request_id = 0;
+	msg.u.err_msg.link_hdl   = link->link_hdl;
+
+	rc = cam_req_mgr_notify_message(&msg,
+		V4L_EVENT_CAM_REQ_MGR_ERROR, V4L_EVENT_CAM_REQ_MGR_EVENT);
+
+	if (rc)
+		CAM_ERR(CAM_CRM,
+			"Error notifying SOF freeze for session %d link 0x%x rc %d",
+			session->session_hdl, link->link_hdl, rc);
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_sof_freeze()
+ *
+ * @brief : Callback function for timer timeout indicating SOF freeze
+ * @data  : timer pointer
+ *
+ */
+static void __cam_req_mgr_sof_freeze(struct timer_list *timer_data)
+{
+	struct cam_req_mgr_timer     *timer =
+		container_of(timer_data, struct cam_req_mgr_timer, sys_timer);
+	struct crm_workq_task               *task = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct crm_task_payload             *task_data;
+
+	if (!timer) {
+		CAM_ERR(CAM_CRM, "NULL timer");
+		return;
+	}
+
+	link = (struct cam_req_mgr_core_link *)timer->parent;
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CAM_ERR(CAM_CRM, "No empty task");
+		return;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_FREEZE;
+	task->process_cb = &__cam_req_mgr_process_sof_freeze;
+	cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+}
+
+/**
+ * __cam_req_mgr_create_subdevs()
+ *
+ * @brief   : Create new crm  subdev to link with realtime devices
+ * @l_dev   : list of subdevs internal to crm
+ * @num_dev : num of subdevs to be created for link
+ *
+ * @return  : pointer to allocated list of devices
+ */
+static int __cam_req_mgr_create_subdevs(
+	struct cam_req_mgr_connected_device **l_dev, int32_t num_dev)
+{
+	int rc = 0;
+	*l_dev = kzalloc(sizeof(struct cam_req_mgr_connected_device) *
+		num_dev, GFP_KERNEL);
+	if (!*l_dev)
+		rc = -ENOMEM;
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_destroy_subdev()
+ *
+ * @brief    : Cleans up the subdevs allocated by crm for link
+ * @l_device : pointer to list of subdevs crm created
+ *
+ */
+static void __cam_req_mgr_destroy_subdev(
+	struct cam_req_mgr_connected_device *l_device)
+{
+	kfree(l_device);
+	l_device = NULL;
+}
+
+/**
+ * __cam_req_mgr_destroy_link_info()
+ *
+ * @brief    : Unlinks all devices on the link
+ * @link     : pointer to link
+ *
+ * @return   : returns if unlink for any device was success or failure
+ */
+static int __cam_req_mgr_disconnect_link(struct cam_req_mgr_core_link *link)
+{
+	int32_t                                 i = 0;
+	struct cam_req_mgr_connected_device    *dev;
+	struct cam_req_mgr_core_dev_link_setup  link_data;
+	int                                     rc = 0;
+
+	link_data.link_enable = 0;
+	link_data.link_hdl = link->link_hdl;
+	link_data.crm_cb = NULL;
+	link_data.subscribe_event = 0;
+
+	/* Using device ops unlink devices */
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+		if (dev == NULL)
+			continue;
+
+		link_data.dev_hdl = dev->dev_hdl;
+		if (dev->ops && dev->ops->link_setup) {
+			rc = dev->ops->link_setup(&link_data);
+			if (rc)
+				CAM_ERR(CAM_CRM,
+					"Unlink failed dev name %s hdl %x",
+					dev->dev_info.name,
+					dev->dev_hdl);
+		}
+		dev->dev_hdl = 0;
+		dev->parent = NULL;
+		dev->ops = NULL;
+	}
+
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_destroy_link_info()
+ *
+ * @brief    : Cleans up the mem allocated while linking
+ * @link     : pointer to link, mem associated with this link is freed
+ */
+static void __cam_req_mgr_destroy_link_info(struct cam_req_mgr_core_link *link)
+{
+	__cam_req_mgr_destroy_all_tbl(&link->req.l_tbl);
+	__cam_req_mgr_reset_in_q(&link->req);
+	link->req.num_tbl = 0;
+	mutex_destroy(&link->req.lock);
+
+	link->pd_mask = 0;
+	link->num_devs = 0;
+	link->max_delay = 0;
+}
+
+/**
+ * __cam_req_mgr_reserve_link()
+ *
+ * @brief: Reserves one link data struct within session
+ * @session: session identifier
+ *
+ * @return: pointer to link reserved
+ *
+ */
+static struct cam_req_mgr_core_link *__cam_req_mgr_reserve_link(
+	struct cam_req_mgr_core_session *session)
+{
+	struct cam_req_mgr_core_link *link;
+	struct cam_req_mgr_req_queue *in_q;
+	int i;
+
+	if (!session || !g_crm_core_dev) {
+		CAM_ERR(CAM_CRM, "NULL session/core_dev ptr");
+		return NULL;
+	}
+
+	if (session->num_links >= MAXIMUM_LINKS_PER_SESSION) {
+		CAM_ERR(CAM_CRM, "Reached max links %d per session limit %d",
+			session->num_links, MAXIMUM_LINKS_PER_SESSION);
+		return NULL;
+	}
+
+	link = kzalloc(sizeof(struct cam_req_mgr_core_link),
+		GFP_KERNEL);
+	if (!link) {
+		CAM_ERR(CAM_CRM, "failed to create link, no mem");
+		return NULL;
+	}
+	in_q = kzalloc(sizeof(struct cam_req_mgr_req_queue),
+		GFP_KERNEL);
+	if (!in_q) {
+		CAM_ERR(CAM_CRM, "failed to create input queue, no mem");
+		kfree(link);
+		return NULL;
+	}
+	mutex_init(&link->lock);
+	spin_lock_init(&link->link_state_spin_lock);
+
+	mutex_lock(&link->lock);
+	link->state = CAM_CRM_LINK_STATE_AVAILABLE;
+	link->num_devs = 0;
+	link->max_delay = 0;
+	memset(in_q->slot, 0,
+		sizeof(struct cam_req_mgr_slot) * MAX_REQ_SLOTS);
+	link->req.in_q = in_q;
+	in_q->num_slots = 0;
+	link->state = CAM_CRM_LINK_STATE_IDLE;
+	link->parent = (void *)session;
+	link->sync_link = NULL;
+	mutex_unlock(&link->lock);
+
+	mutex_lock(&session->lock);
+	/*  Loop through and find a free index */
+	for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+		if (!session->links[i]) {
+			CAM_DBG(CAM_CRM,
+				"Free link index %d found, num_links=%d",
+				i, session->num_links);
+			session->links[i] = link;
+			break;
+		}
+	}
+
+	if (i == MAXIMUM_LINKS_PER_SESSION) {
+		CAM_ERR(CAM_CRM, "Free link index not found");
+		goto error;
+	}
+
+	session->num_links++;
+	CAM_DBG(CAM_CRM, "Active session links (%d)",
+		session->num_links);
+	mutex_unlock(&session->lock);
+
+	return link;
+error:
+	mutex_unlock(&session->lock);
+	kfree(link);
+	kfree(in_q);
+	return NULL;
+}
+
+/*
+ * __cam_req_mgr_free_link()
+ *
+ * @brief: Frees the link and its request queue
+ *
+ * @link: link identifier
+ *
+ */
+static void __cam_req_mgr_free_link(struct cam_req_mgr_core_link *link)
+{
+	kfree(link->req.in_q);
+	link->req.in_q = NULL;
+	kfree(link);
+}
+
+/**
+ * __cam_req_mgr_unreserve_link()
+ *
+ * @brief  : Removes the link data struct from the session and frees it
+ * @session: session identifier
+ * @link   : link identifier
+ *
+ */
+static void __cam_req_mgr_unreserve_link(
+	struct cam_req_mgr_core_session *session,
+	struct cam_req_mgr_core_link *link)
+{
+	int i;
+
+	if (!session || !link) {
+		CAM_ERR(CAM_CRM, "NULL session/link ptr %pK %pK",
+			session, link);
+		return;
+	}
+
+	mutex_lock(&session->lock);
+	if (!session->num_links) {
+		CAM_WARN(CAM_CRM, "No active link or invalid state: hdl %x",
+			link->link_hdl);
+		mutex_unlock(&session->lock);
+		return;
+	}
+
+	for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+		if (session->links[i] == link)
+			session->links[i] = NULL;
+	}
+
+	if ((session->sync_mode != CAM_REQ_MGR_SYNC_MODE_NO_SYNC) &&
+		(link->sync_link)) {
+		/*
+		 * make sure to unlink sync setup under the assumption
+		 * of only having 2 links in a given session
+		 */
+		session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+		for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+			if (session->links[i])
+				session->links[i]->sync_link = NULL;
+		}
+	}
+
+	session->num_links--;
+	CAM_DBG(CAM_CRM, "Active session links (%d)", session->num_links);
+	mutex_unlock(&session->lock);
+	__cam_req_mgr_free_link(link);
+}
+
+/* Workqueue context processing section */
+
+/**
+ * cam_req_mgr_process_send_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to send
+ *         apply request id to drivers.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_send_req(void *priv, void *data)
+{
+	int                                 rc = 0;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_send_request     *send_req = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	send_req = (struct cam_req_mgr_send_request *)data;
+	in_q = send_req->in_q;
+
+	rc = __cam_req_mgr_send_req(link, in_q, CAM_TRIGGER_POINT_SOF);
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_flush_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which requests need to be removed/cancelled.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_flush_req(void *priv, void *data)
+{
+	int                                  rc = 0, i = 0, idx = -1;
+	struct cam_req_mgr_flush_info       *flush_info = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_flush_request     flush_req;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	flush_info  = (struct cam_req_mgr_flush_info *)&task_data->u;
+	CAM_DBG(CAM_REQ, "link_hdl %x req_id %lld type %d",
+		flush_info->link_hdl,
+		flush_info->req_id,
+		flush_info->flush_type);
+
+	in_q = link->req.in_q;
+
+	trace_cam_flush_req(link, flush_info);
+
+	mutex_lock(&link->req.lock);
+	if (flush_info->flush_type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		link->last_flush_id = flush_info->req_id;
+		CAM_INFO(CAM_CRM, "Last request id to flush is %lld",
+			flush_info->req_id);
+		for (i = 0; i < in_q->num_slots; i++) {
+			slot = &in_q->slot[i];
+			slot->req_id = -1;
+			slot->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+			slot->skip_idx = 1;
+			slot->status = CRM_SLOT_STATUS_NO_REQ;
+		}
+		in_q->wr_idx = 0;
+		in_q->rd_idx = 0;
+	} else if (flush_info->flush_type ==
+		CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+		idx = __cam_req_mgr_find_slot_for_req(in_q, flush_info->req_id);
+		if (idx < 0) {
+			CAM_ERR(CAM_CRM, "req_id %lld not found in input queue",
+			flush_info->req_id);
+		} else {
+			CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
+				flush_info->req_id, idx);
+			slot = &in_q->slot[idx];
+			if (slot->status == CRM_SLOT_STATUS_REQ_PENDING ||
+				slot->status == CRM_SLOT_STATUS_REQ_APPLIED) {
+				CAM_WARN(CAM_CRM,
+					"req_id %lld can not be cancelled",
+					flush_info->req_id);
+				mutex_unlock(&link->req.lock);
+				return -EINVAL;
+			}
+			__cam_req_mgr_in_q_skip_idx(in_q, idx);
+		}
+	}
+
+	for (i = 0; i < link->num_devs; i++) {
+		device = &link->l_dev[i];
+		flush_req.link_hdl = flush_info->link_hdl;
+		flush_req.dev_hdl = device->dev_hdl;
+		flush_req.req_id = flush_info->req_id;
+		flush_req.type = flush_info->flush_type;
+		/* @TODO: error return handling from drivers */
+		if (device->ops && device->ops->flush_req)
+			rc = device->ops->flush_req(&flush_req);
+	}
+	complete(&link->workq_comp);
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_sched_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_sched_req(void *priv, void *data)
+{
+	int                               rc = 0;
+	struct cam_req_mgr_sched_request *sched_req = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_req_queue     *in_q = NULL;
+	struct cam_req_mgr_slot          *slot = NULL;
+	struct crm_task_payload          *task_data = NULL;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	sched_req  = (struct cam_req_mgr_sched_request *)&task_data->u;
+	in_q = link->req.in_q;
+
+	CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld at slot %d sync_mode %d",
+		sched_req->link_hdl, sched_req->req_id,
+		in_q->wr_idx, sched_req->sync_mode);
+
+	mutex_lock(&link->req.lock);
+	slot = &in_q->slot[in_q->wr_idx];
+
+	if (slot->status != CRM_SLOT_STATUS_NO_REQ &&
+		slot->status != CRM_SLOT_STATUS_REQ_APPLIED)
+		CAM_WARN(CAM_CRM, "in_q overwrite %d", slot->status);
+
+	slot->status = CRM_SLOT_STATUS_REQ_ADDED;
+	slot->req_id = sched_req->req_id;
+	slot->sync_mode = sched_req->sync_mode;
+	slot->skip_idx = 0;
+	slot->recover = sched_req->bubble_enable;
+	link->open_req_cnt++;
+	__cam_req_mgr_inc_idx(&in_q->wr_idx, 1, in_q->num_slots);
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_add_req()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_add_req(void *priv, void *data)
+{
+	int                                  rc = 0, i = 0, idx;
+	struct cam_req_mgr_add_request      *add_req = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_req_tbl          *tbl = NULL;
+	struct cam_req_mgr_tbl_slot         *slot = NULL;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	add_req = (struct cam_req_mgr_add_request *)&task_data->u;
+
+	for (i = 0; i < link->num_devs; i++) {
+		device = &link->l_dev[i];
+		if (device->dev_hdl == add_req->dev_hdl) {
+			tbl = device->pd_tbl;
+			break;
+		}
+	}
+	if (!tbl) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "dev_hdl not found %x, %x %x",
+			add_req->dev_hdl,
+			link->l_dev[0].dev_hdl,
+			link->l_dev[1].dev_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+	/*
+	 * Go through request table and add
+	 * request id to proper table
+	 * 1. find req slot in in_q matching req_id.sent by dev
+	 * 2. goto table of this device based on p_delay
+	 * 3. mark req_ready_map with this dev_bit.
+	 */
+
+	mutex_lock(&link->req.lock);
+	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+	if (idx < 0) {
+		CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
+		rc = -EBADSLT;
+		mutex_unlock(&link->req.lock);
+		goto end;
+	}
+
+	slot = &tbl->slot[idx];
+	slot->is_applied = false;
+	if ((add_req->skip_before_applying & 0xFF) > slot->inject_delay) {
+		slot->inject_delay = (add_req->skip_before_applying & 0xFF);
+		slot->dev_hdl = add_req->dev_hdl;
+		if (add_req->skip_before_applying & SKIP_NEXT_FRAME)
+			slot->skip_next_frame = true;
+		CAM_DBG(CAM_CRM, "Req_id %llu injecting delay %llu",
+			add_req->req_id,
+			(add_req->skip_before_applying & 0xFF));
+	}
+
+	if (slot->state != CRM_REQ_STATE_PENDING &&
+		slot->state != CRM_REQ_STATE_EMPTY) {
+		CAM_WARN(CAM_CRM, "Unexpected state %d for slot %d map %x",
+			slot->state, idx, slot->req_ready_map);
+	}
+
+	slot->state = CRM_REQ_STATE_PENDING;
+	slot->req_ready_map |= (1 << device->dev_bit);
+
+	CAM_DBG(CAM_CRM, "idx %d dev_hdl %x req_id %lld pd %d ready_map %x",
+		idx, add_req->dev_hdl, add_req->req_id, tbl->pd,
+		slot->req_ready_map);
+
+	trace_cam_req_mgr_add_req(link, idx, add_req, tbl, device);
+
+	if (slot->req_ready_map == tbl->dev_mask) {
+		CAM_DBG(CAM_REQ, "idx %d req_id %lld pd %d SLOT READY",
+			idx, add_req->req_id, tbl->pd);
+		slot->state = CRM_REQ_STATE_READY;
+	}
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_error()
+ *
+ * @brief: This runs in workque thread context. bubble /err recovery.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+int cam_req_mgr_process_error(void *priv, void *data)
+{
+	int                                  rc = 0, idx = -1, i;
+	struct cam_req_mgr_error_notify     *err_info = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct cam_req_mgr_slot             *slot = NULL;
+	struct cam_req_mgr_connected_device *device = NULL;
+	struct cam_req_mgr_link_evt_data     evt_data;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	err_info  = (struct cam_req_mgr_error_notify *)&task_data->u;
+	CAM_DBG(CAM_CRM, "link_hdl %x req_id %lld error %d",
+		err_info->link_hdl,
+		err_info->req_id,
+		err_info->error);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	if (err_info->error == CRM_KMD_ERR_BUBBLE) {
+		idx = __cam_req_mgr_find_slot_for_req(in_q, err_info->req_id);
+		if (idx < 0) {
+			CAM_ERR_RATE_LIMIT(CAM_CRM,
+				"req_id %lld not found in input queue",
+				err_info->req_id);
+		} else {
+			CAM_DBG(CAM_CRM, "req_id %lld found at idx %d",
+				err_info->req_id, idx);
+			slot = &in_q->slot[idx];
+			if (!slot->recover) {
+				CAM_WARN(CAM_CRM,
+					"err recovery disabled req_id %lld",
+					err_info->req_id);
+				mutex_unlock(&link->req.lock);
+				return 0;
+			} else if (slot->status != CRM_SLOT_STATUS_REQ_PENDING
+			&& slot->status != CRM_SLOT_STATUS_REQ_APPLIED) {
+				CAM_WARN(CAM_CRM,
+					"req_id %lld can not be recovered %d",
+					err_info->req_id, slot->status);
+				mutex_unlock(&link->req.lock);
+				return -EINVAL;
+			}
+			/* Notify all devices in the link about error */
+			for (i = 0; i < link->num_devs; i++) {
+				device = &link->l_dev[i];
+				if (device != NULL) {
+					evt_data.dev_hdl = device->dev_hdl;
+					evt_data.evt_type =
+						CAM_REQ_MGR_LINK_EVT_ERR;
+					evt_data.link_hdl =  link->link_hdl;
+					evt_data.req_id = err_info->req_id;
+					evt_data.u.error = err_info->error;
+					if (device->ops &&
+						device->ops->process_evt)
+						rc = device->ops->process_evt(
+							&evt_data);
+				}
+			}
+			/* Bring processing pointer to bubbled req id */
+			__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+			in_q->rd_idx = idx;
+			in_q->slot[idx].status = CRM_SLOT_STATUS_REQ_ADDED;
+			spin_lock_bh(&link->link_state_spin_lock);
+			link->state = CAM_CRM_LINK_STATE_ERR;
+			spin_unlock_bh(&link->link_state_spin_lock);
+		}
+	}
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_process_trigger()
+ *
+ * @brief: This runs in workque thread context. Call core funcs to check
+ *         which peding requests can be processed.
+ * @priv : link information.
+ * @data : contains information about frame_id, link etc.
+ *
+ * @return: 0 on success.
+ */
+static int cam_req_mgr_process_trigger(void *priv, void *data)
+{
+	int                                  rc = 0;
+	struct cam_req_mgr_trigger_notify   *trigger_data = NULL;
+	struct cam_req_mgr_core_link        *link = NULL;
+	struct cam_req_mgr_req_queue        *in_q = NULL;
+	struct crm_task_payload             *task_data = NULL;
+
+	if (!data || !priv) {
+		CAM_ERR(CAM_CRM, "input args NULL %pK %pK", data, priv);
+		rc = -EINVAL;
+		goto end;
+	}
+	link = (struct cam_req_mgr_core_link *)priv;
+	task_data = (struct crm_task_payload *)data;
+	trigger_data = (struct cam_req_mgr_trigger_notify *)&task_data->u;
+
+	CAM_DBG(CAM_REQ, "link_hdl %x frame_id %lld, trigger %x\n",
+		trigger_data->link_hdl,
+		trigger_data->frame_id,
+		trigger_data->trigger);
+
+	in_q = link->req.in_q;
+
+	mutex_lock(&link->req.lock);
+	/*
+	 * Check if current read index is in applied state, if yes make it free
+	 *    and increment read index to next slot.
+	 */
+	CAM_DBG(CAM_CRM, "link_hdl %x curent idx %d req_status %d",
+		link->link_hdl, in_q->rd_idx, in_q->slot[in_q->rd_idx].status);
+
+	spin_lock_bh(&link->link_state_spin_lock);
+	if (link->state == CAM_CRM_LINK_STATE_ERR)
+		CAM_WARN(CAM_CRM, "Error recovery idx %d status %d",
+			in_q->rd_idx,
+			in_q->slot[in_q->rd_idx].status);
+
+	spin_unlock_bh(&link->link_state_spin_lock);
+
+	if (in_q->slot[in_q->rd_idx].status == CRM_SLOT_STATUS_REQ_APPLIED) {
+		/*
+		 * Do NOT reset req q slot data here, it can not be done
+		 * here because we need to preserve the data to handle bubble.
+		 *
+		 * Check if any new req is pending in slot, if not finish the
+		 * lower pipeline delay device with available req ids.
+		 */
+		CAM_DBG(CAM_CRM, "link[%x] Req[%lld] invalidating slot",
+			link->link_hdl, in_q->slot[in_q->rd_idx].req_id);
+		__cam_req_mgr_check_next_req_slot(in_q);
+		__cam_req_mgr_inc_idx(&in_q->rd_idx, 1, in_q->num_slots);
+	}
+	rc = __cam_req_mgr_process_req(link, trigger_data->trigger);
+	mutex_unlock(&link->req.lock);
+
+end:
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_dev_handle_to_name()
+ *
+ * @brief    : Finds device name based on the device handle
+ * @dev_hdl  : Device handle whose name is to be found
+ * @link     : Link on which the device is connected
+ * @return   : String containing the device name
+ *
+ */
+static const char *__cam_req_mgr_dev_handle_to_name(
+	int32_t dev_hdl, struct cam_req_mgr_core_link *link)
+{
+	struct cam_req_mgr_connected_device *dev = NULL;
+	int i = 0;
+
+	for (i = 0; i < link->num_devs; i++) {
+		dev = &link->l_dev[i];
+
+		if (dev_hdl == dev->dev_hdl)
+			return dev->dev_info.name;
+	}
+
+	return "Invalid dev_hdl";
+}
+
+/* Linked devices' Callback section */
+
+/**
+ * cam_req_mgr_cb_add_req()
+ *
+ * @brief    : Drivers call this function to notify new packet is available.
+ * @add_req  : Information about new request available at a device.
+ *
+ * @return   : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_add_req(struct cam_req_mgr_add_request *add_req)
+{
+	int                             rc = 0, idx;
+	struct crm_workq_task          *task = NULL;
+	struct cam_req_mgr_core_link   *link = NULL;
+	struct cam_req_mgr_add_request *dev_req;
+	struct crm_task_payload        *task_data;
+
+	if (!add_req) {
+		CAM_ERR(CAM_CRM, "sof_data is NULL");
+		return -EINVAL;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(add_req->link_hdl);
+
+	if (!link) {
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", add_req->link_hdl);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_REQ, "dev name %s dev_hdl %d dev req %lld",
+		__cam_req_mgr_dev_handle_to_name(add_req->dev_hdl, link),
+		add_req->dev_hdl, add_req->req_id);
+
+	mutex_lock(&link->lock);
+	spin_lock_bh(&link->link_state_spin_lock);
+	if (link->state < CAM_CRM_LINK_STATE_READY) {
+		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+		rc = -EPERM;
+		spin_unlock_bh(&link->link_state_spin_lock);
+		goto end;
+	}
+	spin_unlock_bh(&link->link_state_spin_lock);
+
+	/* Validate if req id is present in input queue */
+	idx = __cam_req_mgr_find_slot_for_req(link->req.in_q, add_req->req_id);
+	if (idx < 0) {
+		CAM_ERR(CAM_CRM, "req %lld not found in in_q", add_req->req_id);
+		rc = -ENOENT;
+		goto end;
+	}
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "no empty task dev %x req %lld",
+			add_req->dev_hdl, add_req->req_id);
+		rc = -EBUSY;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_DEV_ADD_REQ;
+	dev_req = (struct cam_req_mgr_add_request *)&task_data->u;
+	dev_req->req_id = add_req->req_id;
+	dev_req->link_hdl = add_req->link_hdl;
+	dev_req->dev_hdl = add_req->dev_hdl;
+	dev_req->skip_before_applying = add_req->skip_before_applying;
+	task->process_cb = &cam_req_mgr_process_add_req;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+	CAM_DBG(CAM_CRM, "X: dev %x dev req %lld",
+		add_req->dev_hdl, add_req->req_id);
+
+end:
+	mutex_unlock(&link->lock);
+	return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_err()
+ *
+ * @brief    : Error received from device, sends bubble recovery
+ * @err_info : contains information about error occurred like bubble/overflow
+ *
+ * @return   : 0 on success, negative in case of failure
+ *
+ */
+static int cam_req_mgr_cb_notify_err(
+	struct cam_req_mgr_error_notify *err_info)
+{
+	int                              rc = 0;
+	struct crm_workq_task           *task = NULL;
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_error_notify *notify_err;
+	struct crm_task_payload         *task_data;
+
+	if (!err_info) {
+		CAM_ERR(CAM_CRM, "err_info is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(err_info->link_hdl);
+	if (!link) {
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", err_info->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	spin_lock_bh(&link->link_state_spin_lock);
+	if (link->state != CAM_CRM_LINK_STATE_READY) {
+		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+		spin_unlock_bh(&link->link_state_spin_lock);
+		rc = -EPERM;
+		goto end;
+	}
+	crm_timer_reset(link->watchdog);
+	spin_unlock_bh(&link->link_state_spin_lock);
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CAM_ERR(CAM_CRM, "no empty task req_id %lld", err_info->req_id);
+		rc = -EBUSY;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_ERR;
+	notify_err = (struct cam_req_mgr_error_notify *)&task_data->u;
+	notify_err->req_id = err_info->req_id;
+	notify_err->link_hdl = err_info->link_hdl;
+	notify_err->dev_hdl = err_info->dev_hdl;
+	notify_err->error = err_info->error;
+	task->process_cb = &cam_req_mgr_process_error;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+	return rc;
+}
+
+/**
+ * cam_req_mgr_cb_notify_trigger()
+ *
+ * @brief   : SOF received from device, sends trigger through workqueue
+ * @sof_data: contains information about frame_id, link etc.
+ *
+ * @return  : 0 on success
+ *
+ */
+static int cam_req_mgr_cb_notify_trigger(
+	struct cam_req_mgr_trigger_notify *trigger_data)
+{
+	int                              rc = 0;
+	struct crm_workq_task           *task = NULL;
+	struct cam_req_mgr_core_link    *link = NULL;
+	struct cam_req_mgr_trigger_notify   *notify_trigger;
+	struct crm_task_payload         *task_data;
+
+	if (!trigger_data) {
+		CAM_ERR(CAM_CRM, "sof_data is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(trigger_data->link_hdl);
+	if (!link) {
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", trigger_data->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	spin_lock_bh(&link->link_state_spin_lock);
+	if (link->state < CAM_CRM_LINK_STATE_READY) {
+		CAM_WARN(CAM_CRM, "invalid link state:%d", link->state);
+		spin_unlock_bh(&link->link_state_spin_lock);
+		rc = -EPERM;
+		goto end;
+	}
+	crm_timer_reset(link->watchdog);
+	spin_unlock_bh(&link->link_state_spin_lock);
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		CAM_ERR(CAM_CRM, "no empty task frame %lld",
+			trigger_data->frame_id);
+		rc = -EBUSY;
+		goto end;
+	}
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_NOTIFY_SOF;
+	notify_trigger = (struct cam_req_mgr_trigger_notify *)&task_data->u;
+	notify_trigger->frame_id = trigger_data->frame_id;
+	notify_trigger->link_hdl = trigger_data->link_hdl;
+	notify_trigger->dev_hdl = trigger_data->dev_hdl;
+	notify_trigger->trigger = trigger_data->trigger;
+	task->process_cb = &cam_req_mgr_process_trigger;
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+end:
+	return rc;
+}
+
+static struct cam_req_mgr_crm_cb cam_req_mgr_ops = {
+	.notify_trigger = cam_req_mgr_cb_notify_trigger,
+	.notify_err     = cam_req_mgr_cb_notify_err,
+	.add_req        = cam_req_mgr_cb_add_req,
+};
+
+/**
+ * __cam_req_mgr_setup_link_info()
+ *
+ * @brief     : Sets up input queue, create pd based tables, communicate with
+ *              devs connected on this link and setup communication.
+ * @link      : pointer to link to setup
+ * @link_info : link_info coming from CSL to prepare link
+ *
+ * @return    : 0 on success, negative in case of failure
+ *
+ */
+static int __cam_req_mgr_setup_link_info(struct cam_req_mgr_core_link *link,
+	struct cam_req_mgr_link_info *link_info)
+{
+	int                                     rc = 0, i = 0;
+	struct cam_req_mgr_core_dev_link_setup  link_data;
+	struct cam_req_mgr_connected_device    *dev;
+	struct cam_req_mgr_req_tbl             *pd_tbl;
+	enum cam_pipeline_delay                 max_delay;
+	uint32_t                                subscribe_event = 0;
+
+	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES)
+		return -EPERM;
+
+	mutex_init(&link->req.lock);
+	CAM_DBG(CAM_CRM, "LOCK_DBG in_q lock %pK", &link->req.lock);
+	link->req.num_tbl = 0;
+
+	rc = __cam_req_mgr_setup_in_q(&link->req);
+	if (rc < 0)
+		return rc;
+
+	max_delay = CAM_PIPELINE_DELAY_0;
+	for (i = 0; i < link_info->num_devices; i++) {
+		dev = &link->l_dev[i];
+		/* Using dev hdl, get ops ptr to communicate with device */
+		dev->ops = (struct cam_req_mgr_kmd_ops *)
+			cam_get_device_ops(link_info->dev_hdls[i]);
+		if (!dev->ops ||
+			!dev->ops->get_dev_info ||
+			!dev->ops->link_setup) {
+			CAM_ERR(CAM_CRM, "FATAL: device ops NULL");
+			rc = -ENXIO;
+			goto error;
+		}
+		dev->dev_hdl = link_info->dev_hdls[i];
+		dev->parent = (void *)link;
+		dev->dev_info.dev_hdl = dev->dev_hdl;
+		rc = dev->ops->get_dev_info(&dev->dev_info);
+
+		trace_cam_req_mgr_connect_device(link, &dev->dev_info);
+
+		CAM_DBG(CAM_CRM,
+			"%x: connected: %s, id %d, delay %d, trigger %x",
+			link_info->session_hdl, dev->dev_info.name,
+			dev->dev_info.dev_id, dev->dev_info.p_delay,
+			dev->dev_info.trigger);
+		if (rc < 0 ||
+			dev->dev_info.p_delay >=
+			CAM_PIPELINE_DELAY_MAX ||
+			dev->dev_info.p_delay <
+			CAM_PIPELINE_DELAY_0) {
+			CAM_ERR(CAM_CRM, "get device info failed");
+			goto error;
+		} else {
+			CAM_DBG(CAM_CRM, "%x: connected: %s, delay %d",
+				link_info->session_hdl,
+				dev->dev_info.name,
+				dev->dev_info.p_delay);
+			if (dev->dev_info.p_delay > max_delay)
+				max_delay = dev->dev_info.p_delay;
+
+			subscribe_event |= (uint32_t)dev->dev_info.trigger;
+		}
+	}
+
+	link->subscribe_event = subscribe_event;
+	link_data.link_enable = 1;
+	link_data.link_hdl = link->link_hdl;
+	link_data.crm_cb = &cam_req_mgr_ops;
+	link_data.max_delay = max_delay;
+	link_data.subscribe_event = subscribe_event;
+
+	for (i = 0; i < link_info->num_devices; i++) {
+		dev = &link->l_dev[i];
+
+		link_data.dev_hdl = dev->dev_hdl;
+		/*
+		 * For unique pipeline delay table create request
+		 * tracking table
+		 */
+		if (link->pd_mask & (1 << dev->dev_info.p_delay)) {
+			pd_tbl = __cam_req_mgr_find_pd_tbl(link->req.l_tbl,
+				dev->dev_info.p_delay);
+			if (!pd_tbl) {
+				CAM_ERR(CAM_CRM, "pd %d tbl not found",
+					dev->dev_info.p_delay);
+				rc = -ENXIO;
+				goto error;
+			}
+		} else {
+			pd_tbl = __cam_req_mgr_create_pd_tbl(
+				dev->dev_info.p_delay);
+			if (pd_tbl == NULL) {
+				CAM_ERR(CAM_CRM, "create new pd tbl failed");
+				rc = -ENXIO;
+				goto error;
+			}
+			pd_tbl->pd = dev->dev_info.p_delay;
+			link->pd_mask |= (1 << pd_tbl->pd);
+			/*
+			 * Add table to list and also sort list
+			 * from max pd to lowest
+			 */
+			__cam_req_mgr_add_tbl_to_link(&link->req.l_tbl, pd_tbl);
+		}
+		dev->dev_bit = pd_tbl->dev_count++;
+		dev->pd_tbl = pd_tbl;
+		pd_tbl->dev_mask |= (1 << dev->dev_bit);
+
+		/* Communicate with dev to establish the link */
+		dev->ops->link_setup(&link_data);
+
+		if (link->max_delay < dev->dev_info.p_delay)
+			link->max_delay = dev->dev_info.p_delay;
+	}
+	link->num_devs = link_info->num_devices;
+
+	/* Assign id for pd tables */
+	__cam_req_mgr_tbl_set_id(link->req.l_tbl, &link->req);
+
+	/* At start, expect max pd devices, all are in skip state */
+	__cam_req_mgr_tbl_set_all_skip_cnt(&link->req.l_tbl);
+
+	return 0;
+
+error:
+	__cam_req_mgr_destroy_link_info(link);
+	return rc;
+}
+
+/* IOCTLs handling section */
+int cam_req_mgr_create_session(
+	struct cam_req_mgr_session_info *ses_info)
+{
+	int                              rc = 0;
+	int32_t                          session_hdl;
+	struct cam_req_mgr_core_session *cam_session = NULL;
+
+	if (!ses_info) {
+		CAM_DBG(CAM_CRM, "NULL session info pointer");
+		return -EINVAL;
+	}
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	cam_session = kzalloc(sizeof(*cam_session),
+		GFP_KERNEL);
+	if (!cam_session) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	session_hdl = cam_create_session_hdl((void *)cam_session);
+	if (session_hdl < 0) {
+		CAM_ERR(CAM_CRM, "unable to create session_hdl = %x",
+			session_hdl);
+		rc = session_hdl;
+		kfree(cam_session);
+		goto end;
+	}
+	ses_info->session_hdl = session_hdl;
+
+	mutex_init(&cam_session->lock);
+	CAM_DBG(CAM_CRM, "LOCK_DBG session lock %pK", &cam_session->lock);
+
+	mutex_lock(&cam_session->lock);
+	cam_session->session_hdl = session_hdl;
+	cam_session->num_links = 0;
+	cam_session->sync_mode = CAM_REQ_MGR_SYNC_MODE_NO_SYNC;
+	list_add(&cam_session->entry, &g_crm_core_dev->session_head);
+	mutex_unlock(&cam_session->lock);
+end:
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+}
+
+/**
+ * __cam_req_mgr_unlink()
+ *
+ * @brief : Unlink devices on a link structure from the session
+ * @link  : Pointer to the link structure
+ *
+ * @return: 0 for success, negative for failure
+ *
+ */
+static int __cam_req_mgr_unlink(struct cam_req_mgr_core_link *link)
+{
+	int rc;
+
+	spin_lock_bh(&link->link_state_spin_lock);
+	link->state = CAM_CRM_LINK_STATE_IDLE;
+	spin_unlock_bh(&link->link_state_spin_lock);
+
+	rc = __cam_req_mgr_disconnect_link(link);
+	if (rc)
+		CAM_ERR(CAM_CORE,
+			"Unlink for all devices was not successful");
+
+	mutex_lock(&link->lock);
+	/* Destroy timer of link */
+	crm_timer_exit(&link->watchdog);
+
+	/* Destroy workq of link */
+	cam_req_mgr_workq_destroy(&link->workq);
+
+	/* Cleanup request tables and unlink devices */
+	__cam_req_mgr_destroy_link_info(link);
+
+	/* Free memory holding data of linked devs */
+	__cam_req_mgr_destroy_subdev(link->l_dev);
+
+	/* Destroy the link handle */
+	rc = cam_destroy_device_hdl(link->link_hdl);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRM, "error destroying link hdl %x rc %d",
+			link->link_hdl, rc);
+	} else
+		link->link_hdl = -1;
+
+	mutex_unlock(&link->lock);
+	return rc;
+}
+
+int cam_req_mgr_destroy_session(
+		struct cam_req_mgr_session_info *ses_info)
+{
+	int rc;
+	int i;
+	struct cam_req_mgr_core_session *cam_session = NULL;
+	struct cam_req_mgr_core_link *link;
+
+	if (!ses_info) {
+		CAM_DBG(CAM_CRM, "NULL session info pointer");
+		return -EINVAL;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	cam_session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(ses_info->session_hdl);
+	if (!cam_session) {
+		CAM_ERR(CAM_CRM, "failed to get session priv");
+		rc = -ENOENT;
+		goto end;
+
+	}
+	if (cam_session->num_links) {
+		CAM_DBG(CAM_CRM, "destroy session %x num_active_links %d",
+			ses_info->session_hdl,
+			cam_session->num_links);
+
+		for (i = 0; i < MAXIMUM_LINKS_PER_SESSION; i++) {
+			link = cam_session->links[i];
+
+			if (!link)
+				continue;
+
+			/* Ignore return value since session is going away */
+			__cam_req_mgr_unlink(link);
+			__cam_req_mgr_free_link(link);
+		}
+	}
+	list_del(&cam_session->entry);
+	mutex_destroy(&cam_session->lock);
+	kfree(cam_session);
+
+	rc = cam_destroy_session_hdl(ses_info->session_hdl);
+	if (rc < 0)
+		CAM_ERR(CAM_CRM, "unable to destroy session_hdl = %x rc %d",
+			ses_info->session_hdl, rc);
+
+end:
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+}
+
+int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info)
+{
+	int                                     rc = 0;
+	int                                     wq_flag = 0;
+	char                                    buf[128];
+	struct cam_create_dev_hdl               root_dev;
+	struct cam_req_mgr_core_session        *cam_session;
+	struct cam_req_mgr_core_link           *link;
+
+	if (!link_info) {
+		CAM_DBG(CAM_CRM, "NULL pointer");
+		return -EINVAL;
+	}
+	if (link_info->num_devices > CAM_REQ_MGR_MAX_HANDLES) {
+		CAM_ERR(CAM_CRM, "Invalid num devices %d",
+			link_info->num_devices);
+		return -EINVAL;
+	}
+
+	/* session hdl's priv data is cam session struct */
+	cam_session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(link_info->session_hdl);
+	if (!cam_session) {
+		CAM_DBG(CAM_CRM, "NULL pointer");
+		return -EINVAL;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+
+	/* Allocate link struct and map it with session's request queue */
+	link = __cam_req_mgr_reserve_link(cam_session);
+	if (!link) {
+		CAM_ERR(CAM_CRM, "failed to reserve new link");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
+		return -EINVAL;
+	}
+	CAM_DBG(CAM_CRM, "link reserved %pK %x", link, link->link_hdl);
+
+	memset(&root_dev, 0, sizeof(struct cam_create_dev_hdl));
+	root_dev.session_hdl = link_info->session_hdl;
+	root_dev.priv = (void *)link;
+
+	mutex_lock(&link->lock);
+	/* Create unique dev handle for link */
+	link->link_hdl = cam_create_device_hdl(&root_dev);
+	if (link->link_hdl < 0) {
+		CAM_ERR(CAM_CRM,
+			"Insufficient memory to create new device handle");
+		rc = link->link_hdl;
+		goto link_hdl_fail;
+	}
+	link_info->link_hdl = link->link_hdl;
+	link->last_flush_id = 0;
+
+	/* Allocate memory to hold data of all linked devs */
+	rc = __cam_req_mgr_create_subdevs(&link->l_dev,
+		link_info->num_devices);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRM,
+			"Insufficient memory to create new crm subdevs");
+		goto create_subdev_failed;
+	}
+
+	/* Using device ops query connected devs, prepare request tables */
+	rc = __cam_req_mgr_setup_link_info(link, link_info);
+	if (rc < 0)
+		goto setup_failed;
+
+	spin_lock_bh(&link->link_state_spin_lock);
+	link->state = CAM_CRM_LINK_STATE_READY;
+	spin_unlock_bh(&link->link_state_spin_lock);
+
+	/* Create worker for current link */
+	snprintf(buf, sizeof(buf), "%x-%x",
+		link_info->session_hdl, link->link_hdl);
+	wq_flag = CAM_WORKQ_FLAG_HIGH_PRIORITY | CAM_WORKQ_FLAG_SERIAL;
+	rc = cam_req_mgr_workq_create(buf, CRM_WORKQ_NUM_TASKS,
+		&link->workq, CRM_WORKQ_USAGE_NON_IRQ, wq_flag);
+	if (rc < 0) {
+		CAM_ERR(CAM_CRM, "FATAL: unable to create worker");
+		__cam_req_mgr_destroy_link_info(link);
+		goto setup_failed;
+	}
+
+	/* Assign payload to workqueue tasks */
+	rc = __cam_req_mgr_setup_payload(link->workq);
+	if (rc < 0) {
+		__cam_req_mgr_destroy_link_info(link);
+		cam_req_mgr_workq_destroy(&link->workq);
+		goto setup_failed;
+	}
+
+	mutex_unlock(&link->lock);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+setup_failed:
+	__cam_req_mgr_destroy_subdev(link->l_dev);
+create_subdev_failed:
+	cam_destroy_device_hdl(link->link_hdl);
+	link_info->link_hdl = -1;
+link_hdl_fail:
+	mutex_unlock(&link->lock);
+	__cam_req_mgr_unreserve_link(cam_session, link);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+}
+
+int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info)
+{
+	int                              rc = 0;
+	struct cam_req_mgr_core_session *cam_session;
+	struct cam_req_mgr_core_link    *link;
+
+	if (!unlink_info) {
+		CAM_ERR(CAM_CRM, "NULL pointer");
+		return -EINVAL;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	CAM_DBG(CAM_CRM, "link_hdl %x", unlink_info->link_hdl);
+
+	/* session hdl's priv data is cam session struct */
+	cam_session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(unlink_info->session_hdl);
+	if (!cam_session) {
+		CAM_ERR(CAM_CRM, "NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
+		return -EINVAL;
+	}
+
+	/* link hdl's priv data is core_link struct */
+	link = cam_get_device_priv(unlink_info->link_hdl);
+	if (!link) {
+		CAM_ERR(CAM_CRM, "NULL pointer");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	rc = __cam_req_mgr_unlink(link);
+
+	/* Free curent link and put back into session's free pool of links */
+	__cam_req_mgr_unreserve_link(cam_session, link);
+
+done:
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+}
+
+int cam_req_mgr_schedule_request(
+			struct cam_req_mgr_sched_request *sched_req)
+{
+	int                               rc = 0;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_core_session  *session = NULL;
+	struct cam_req_mgr_sched_request *sched;
+	struct crm_task_payload           task_data;
+
+	if (!sched_req) {
+		CAM_ERR(CAM_CRM, "csl_req is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(sched_req->link_hdl);
+	if (!link) {
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", sched_req->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	session = (struct cam_req_mgr_core_session *)link->parent;
+	if (!session) {
+		CAM_WARN(CAM_CRM, "session ptr NULL %x", sched_req->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (sched_req->req_id <= link->last_flush_id) {
+		CAM_INFO(CAM_CRM,
+			"request %d is flushed, last_flush_id to flush %lld",
+			sched_req->req_id, link->last_flush_id);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (sched_req->req_id > link->last_flush_id)
+		link->last_flush_id = 0;
+
+	CAM_DBG(CAM_CRM, "link 0x%x req %lld, sync_mode %d",
+		sched_req->link_hdl, sched_req->req_id, sched_req->sync_mode);
+
+	task_data.type = CRM_WORKQ_TASK_SCHED_REQ;
+	sched = (struct cam_req_mgr_sched_request *)&task_data.u;
+	sched->req_id = sched_req->req_id;
+	sched->sync_mode = sched_req->sync_mode;
+	sched->link_hdl = sched_req->link_hdl;
+	if (session->force_err_recovery == AUTO_RECOVERY) {
+		sched->bubble_enable = sched_req->bubble_enable;
+	} else {
+		sched->bubble_enable =
+		(session->force_err_recovery == FORCE_ENABLE_RECOVERY) ? 1 : 0;
+	}
+
+	rc = cam_req_mgr_process_sched_req(link, &task_data);
+
+	CAM_DBG(CAM_REQ, "Open req %lld on link 0x%x with sync_mode %d",
+		sched_req->req_id, sched_req->link_hdl, sched_req->sync_mode);
+end:
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+}
+
+int cam_req_mgr_sync_config(
+	struct cam_req_mgr_sync_mode *sync_info)
+{
+	int                              rc = 0;
+	struct cam_req_mgr_core_session *cam_session;
+	struct cam_req_mgr_core_link    *link1 = NULL;
+	struct cam_req_mgr_core_link    *link2 = NULL;
+
+	if (!sync_info) {
+		CAM_ERR(CAM_CRM, "NULL pointer");
+		return -EINVAL;
+	}
+
+	if ((sync_info->num_links < 0) ||
+		(sync_info->num_links >
+		MAX_LINKS_PER_SESSION)) {
+		CAM_ERR(CAM_CRM, "Invalid num links %d", sync_info->num_links);
+		return -EINVAL;
+	}
+
+	if ((!sync_info->link_hdls[0]) || (!sync_info->link_hdls[1])) {
+		CAM_WARN(CAM_CRM, "Invalid link handles 0x%x 0x%x",
+			sync_info->link_hdls[0], sync_info->link_hdls[1]);
+		return -EINVAL;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	/* session hdl's priv data is cam session struct */
+	cam_session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(sync_info->session_hdl);
+	if (!cam_session) {
+		CAM_ERR(CAM_CRM, "NULL pointer");
+		mutex_unlock(&g_crm_core_dev->crm_lock);
+		return -EINVAL;
+	}
+
+	mutex_lock(&cam_session->lock);
+
+	CAM_DBG(CAM_CRM, "link handles %x %x",
+		sync_info->link_hdls[0], sync_info->link_hdls[1]);
+
+	/* only two links existing per session in dual cam use case*/
+	link1 = cam_get_device_priv(sync_info->link_hdls[0]);
+	if (!link1) {
+		CAM_ERR(CAM_CRM, "link1 NULL pointer");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	link2 = cam_get_device_priv(sync_info->link_hdls[1]);
+	if (!link2) {
+		CAM_ERR(CAM_CRM, "link2 NULL pointer");
+		rc = -EINVAL;
+		goto done;
+	}
+
+	link1->sof_counter = -1;
+	link1->sync_self_ref = -1;
+	link1->frame_skip_flag = false;
+	link1->sync_link_sof_skip = false;
+	link1->sync_link = link2;
+
+	link2->sof_counter = -1;
+	link2->sync_self_ref = -1;
+	link2->frame_skip_flag = false;
+	link2->sync_link_sof_skip = false;
+	link2->sync_link = link1;
+
+	cam_session->sync_mode = sync_info->sync_mode;
+	CAM_DBG(CAM_REQ,
+		"Sync config on link1 0x%x & link2 0x%x with sync_mode %d",
+		link1->link_hdl, link2->link_hdl, cam_session->sync_mode);
+
+done:
+	mutex_unlock(&cam_session->lock);
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+}
+
+int cam_req_mgr_flush_requests(
+	struct cam_req_mgr_flush_info *flush_info)
+{
+	int                               rc = 0;
+	struct crm_workq_task            *task = NULL;
+	struct cam_req_mgr_core_link     *link = NULL;
+	struct cam_req_mgr_flush_info    *flush;
+	struct crm_task_payload          *task_data;
+	struct cam_req_mgr_core_session  *session = NULL;
+
+	if (!flush_info) {
+		CAM_ERR(CAM_CRM, "flush req is NULL");
+		rc = -EFAULT;
+		goto end;
+	}
+	if (flush_info->flush_type >= CAM_REQ_MGR_FLUSH_TYPE_MAX) {
+		CAM_ERR(CAM_CRM, "incorrect flush type %x",
+			flush_info->flush_type);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	/* session hdl's priv data is cam session struct */
+	session = (struct cam_req_mgr_core_session *)
+		cam_get_device_priv(flush_info->session_hdl);
+	if (!session) {
+		CAM_ERR(CAM_CRM, "Invalid session %x", flush_info->session_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+	if (session->num_links <= 0) {
+		CAM_WARN(CAM_CRM, "No active links in session %x",
+		flush_info->session_hdl);
+		goto end;
+	}
+
+	link = (struct cam_req_mgr_core_link *)
+		cam_get_device_priv(flush_info->link_hdl);
+	if (!link) {
+		CAM_DBG(CAM_CRM, "link ptr NULL %x", flush_info->link_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	task = cam_req_mgr_workq_get_task(link->workq);
+	if (!task) {
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	task_data = (struct crm_task_payload *)task->payload;
+	task_data->type = CRM_WORKQ_TASK_FLUSH_REQ;
+	flush = (struct cam_req_mgr_flush_info *)&task_data->u;
+	flush->req_id = flush_info->req_id;
+	flush->link_hdl = flush_info->link_hdl;
+	flush->flush_type = flush_info->flush_type;
+	task->process_cb = &cam_req_mgr_process_flush_req;
+	init_completion(&link->workq_comp);
+	rc = cam_req_mgr_workq_enqueue_task(task, link, CRM_TASK_PRIORITY_0);
+
+	/* Blocking call */
+	rc = wait_for_completion_timeout(
+		&link->workq_comp,
+		msecs_to_jiffies(CAM_REQ_MGR_SCHED_REQ_TIMEOUT));
+end:
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+	return rc;
+}
+
+int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control)
+{
+	int                               rc = 0;
+	int                               i, j;
+	struct cam_req_mgr_core_link     *link = NULL;
+
+	struct cam_req_mgr_connected_device *dev = NULL;
+	struct cam_req_mgr_link_evt_data     evt_data;
+
+	if (!control) {
+		CAM_ERR(CAM_CRM, "Control command is NULL");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (control->num_links > MAX_LINKS_PER_SESSION) {
+		CAM_ERR(CAM_CRM, "Invalid number of links %d",
+			control->num_links);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&g_crm_core_dev->crm_lock);
+	for (i = 0; i < control->num_links; i++) {
+		link = (struct cam_req_mgr_core_link *)
+			cam_get_device_priv(control->link_hdls[i]);
+		if (!link) {
+			CAM_ERR(CAM_CRM, "Link(%d) is NULL on session 0x%x",
+				i, control->session_hdl);
+			rc = -EINVAL;
+			break;
+		}
+
+		mutex_lock(&link->lock);
+		if (control->ops == CAM_REQ_MGR_LINK_ACTIVATE) {
+			/* Start SOF watchdog timer */
+			rc = crm_timer_init(&link->watchdog,
+				CAM_REQ_MGR_WATCHDOG_TIMEOUT, link,
+				&__cam_req_mgr_sof_freeze);
+			if (rc < 0) {
+				CAM_ERR(CAM_CRM,
+					"SOF timer start fails: link=0x%x",
+					link->link_hdl);
+				rc = -EFAULT;
+			}
+			/* notify nodes */
+			for (j = 0; j < link->num_devs; j++) {
+				dev = &link->l_dev[j];
+				evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_RESUME;
+				evt_data.link_hdl =  link->link_hdl;
+				evt_data.dev_hdl = dev->dev_hdl;
+				evt_data.req_id = 0;
+				if (dev->ops && dev->ops->process_evt)
+					dev->ops->process_evt(&evt_data);
+			}
+		} else if (control->ops == CAM_REQ_MGR_LINK_DEACTIVATE) {
+			/* Destroy SOF watchdog timer */
+			spin_lock_bh(&link->link_state_spin_lock);
+			crm_timer_exit(&link->watchdog);
+			spin_unlock_bh(&link->link_state_spin_lock);
+			/* notify nodes */
+			for (j = 0; j < link->num_devs; j++) {
+				dev = &link->l_dev[j];
+				evt_data.evt_type = CAM_REQ_MGR_LINK_EVT_PAUSE;
+				evt_data.link_hdl =  link->link_hdl;
+				evt_data.dev_hdl = dev->dev_hdl;
+				evt_data.req_id = 0;
+				if (dev->ops && dev->ops->process_evt)
+					dev->ops->process_evt(&evt_data);
+			}
+		} else {
+			CAM_ERR(CAM_CRM, "Invalid link control command");
+			rc = -EINVAL;
+		}
+		mutex_unlock(&link->lock);
+	}
+	mutex_unlock(&g_crm_core_dev->crm_lock);
+end:
+	return rc;
+}
+
+
+int cam_req_mgr_core_device_init(void)
+{
+	CAM_DBG(CAM_CRM, "Enter g_crm_core_dev %pK", g_crm_core_dev);
+
+	if (g_crm_core_dev) {
+		CAM_WARN(CAM_CRM, "core device is already initialized");
+		return 0;
+	}
+	g_crm_core_dev = kzalloc(sizeof(*g_crm_core_dev),
+		GFP_KERNEL);
+	if (!g_crm_core_dev)
+		return -ENOMEM;
+
+	CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
+	INIT_LIST_HEAD(&g_crm_core_dev->session_head);
+	mutex_init(&g_crm_core_dev->crm_lock);
+	cam_req_mgr_debug_register(g_crm_core_dev);
+
+	return 0;
+}
+
+int cam_req_mgr_core_device_deinit(void)
+{
+	if (!g_crm_core_dev) {
+		CAM_ERR(CAM_CRM, "NULL pointer");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_CRM, "g_crm_core_dev %pK", g_crm_core_dev);
+	mutex_destroy(&g_crm_core_dev->crm_lock);
+	kfree(g_crm_core_dev);
+	g_crm_core_dev = NULL;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
new file mode 100644
index 0000000..85b13e5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -0,0 +1,465 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_REQ_MGR_CORE_H_
+#define _CAM_REQ_MGR_CORE_H_
+
+#include <linux/spinlock.h>
+#include "cam_req_mgr_interface.h"
+#include "cam_req_mgr_core_defs.h"
+#include "cam_req_mgr_timer.h"
+
+#define CAM_REQ_MGR_MAX_LINKED_DEV     16
+#define MAX_REQ_SLOTS                  48
+
+#define CAM_REQ_MGR_WATCHDOG_TIMEOUT   5000
+#define CAM_REQ_MGR_SCHED_REQ_TIMEOUT  1000
+#define CAM_REQ_MGR_SIMULATE_SCHED_REQ 30
+
+#define FORCE_DISABLE_RECOVERY  2
+#define FORCE_ENABLE_RECOVERY   1
+#define AUTO_RECOVERY           0
+
+#define CRM_WORKQ_NUM_TASKS 60
+
+#define MAX_SYNC_COUNT 65535
+
+#define SYNC_LINK_SOF_CNT_MAX_LMT 1
+
+#define MAXIMUM_LINKS_PER_SESSION  4
+
+/**
+ * enum crm_workq_task_type
+ * @codes: to identify which type of task is present
+ */
+enum crm_workq_task_type {
+	CRM_WORKQ_TASK_GET_DEV_INFO,
+	CRM_WORKQ_TASK_SETUP_LINK,
+	CRM_WORKQ_TASK_DEV_ADD_REQ,
+	CRM_WORKQ_TASK_APPLY_REQ,
+	CRM_WORKQ_TASK_NOTIFY_SOF,
+	CRM_WORKQ_TASK_NOTIFY_ERR,
+	CRM_WORKQ_TASK_NOTIFY_FREEZE,
+	CRM_WORKQ_TASK_SCHED_REQ,
+	CRM_WORKQ_TASK_FLUSH_REQ,
+	CRM_WORKQ_TASK_INVALID,
+};
+
+/**
+ * struct crm_task_payload
+ * @type           : to identify which type of task is present
+ * @u              : union of payload of all types of tasks supported
+ * @sched_req      : contains info of  incoming reqest from CSL to CRM
+ * @flush_info     : contains info of cancelled reqest
+ * @dev_req        : contains tracking info of available req id at device
+ * @send_req       : contains info of apply settings to be sent to devs in link
+ * @apply_req      : contains info of which request is applied at device
+ * @notify_trigger : contains notification from IFE to CRM about trigger
+ * @notify_err     : contains error info happened while processing request
+ * -
+ */
+struct crm_task_payload {
+	enum crm_workq_task_type type;
+	union {
+		struct cam_req_mgr_sched_request        sched_req;
+		struct cam_req_mgr_flush_info           flush_info;
+		struct cam_req_mgr_add_request          dev_req;
+		struct cam_req_mgr_send_request         send_req;
+		struct cam_req_mgr_trigger_notify       notify_trigger;
+		struct cam_req_mgr_error_notify         notify_err;
+	} u;
+};
+
+/**
+ * enum crm_req_state
+ * State machine for life cycle of request in pd table
+ * EMPTY   : indicates req slot is empty
+ * PENDING : indicates req slot is waiting for reqs from all devs
+ * READY   : indicates req slot is ready to be sent to devs
+ * INVALID : indicates req slot is not in valid state
+ */
+enum crm_req_state {
+	CRM_REQ_STATE_EMPTY,
+	CRM_REQ_STATE_PENDING,
+	CRM_REQ_STATE_READY,
+	CRM_REQ_STATE_INVALID,
+};
+
+/**
+ * enum crm_slot_status
+ * State machine for life cycle of request in input queue
+ * NO_REQ     : empty slot
+ * REQ_ADDED  : new entry in slot
+ * PENDING    : waiting for next trigger to apply
+ * APPLIED    : req is sent to all devices
+ * INVALID    : invalid state
+ */
+enum crm_slot_status {
+	CRM_SLOT_STATUS_NO_REQ,
+	CRM_SLOT_STATUS_REQ_ADDED,
+	CRM_SLOT_STATUS_REQ_PENDING,
+	CRM_SLOT_STATUS_REQ_APPLIED,
+	CRM_SLOT_STATUS_INVALID,
+};
+
+/**
+ * enum cam_req_mgr_link_state
+ * State machine for life cycle of link in crm
+ * AVAILABLE  : link available
+ * IDLE       : link initialized but not ready yet
+ * READY      : link is ready for use
+ * ERR        : link has encountered error
+ * MAX        : invalid state
+ */
+enum cam_req_mgr_link_state {
+	CAM_CRM_LINK_STATE_AVAILABLE,
+	CAM_CRM_LINK_STATE_IDLE,
+	CAM_CRM_LINK_STATE_READY,
+	CAM_CRM_LINK_STATE_ERR,
+	CAM_CRM_LINK_STATE_MAX,
+};
+
+/**
+ * struct cam_req_mgr_traverse
+ * @idx              : slot index
+ * @result           : contains which all tables were able to apply successfully
+ * @tbl              : pointer of pipeline delay based request table
+ * @apply_data       : pointer which various tables will update during traverse
+ * @in_q             : input request queue pointer
+ * @validate_only    : Whether to validate only and/or update settings
+ * @self_link        : To indicate whether the check is for the given link or
+ *                     the other sync link
+ * @inject_delay_chk : if inject delay has been validated for all pd devices
+ * @open_req_cnt     : Count of open requests yet to be serviced in the kernel.
+ */
+struct cam_req_mgr_traverse {
+	int32_t                       idx;
+	uint32_t                      result;
+	struct cam_req_mgr_req_tbl   *tbl;
+	struct cam_req_mgr_apply     *apply_data;
+	struct cam_req_mgr_req_queue *in_q;
+	bool                          validate_only;
+	bool                          self_link;
+	bool                          inject_delay_chk;
+	int32_t                       open_req_cnt;
+};
+
+/**
+ * struct cam_req_mgr_apply
+ * @idx      : corresponding input queue slot index
+ * @pd       : pipeline delay of device
+ * @req_id   : req id for dev with above pd to process
+ * @skip_idx: skip applying settings when this is set.
+ */
+struct cam_req_mgr_apply {
+	int32_t idx;
+	int32_t pd;
+	int64_t req_id;
+	int32_t skip_idx;
+};
+
+/**
+ * struct cam_req_mgr_tbl_slot
+ * @idx             : slot index
+ * @req_ready_map   : mask tracking which all devices have request ready
+ * @state           : state machine for life cycle of a slot
+ * @inject_delay    : insert extra bubbling for flash type of use cases
+ * @dev_hdl         : stores the dev_hdl, who is having higher inject delay
+ * @skip_next_frame : flag to drop the frame after skip_before_apply frame
+ * @is_applied      : flag to identify if request is already applied to
+ *                    device.
+ */
+struct cam_req_mgr_tbl_slot {
+	int32_t             idx;
+	uint32_t            req_ready_map;
+	enum crm_req_state  state;
+	uint32_t            inject_delay;
+	int32_t             dev_hdl;
+	bool                skip_next_frame;
+	bool                is_applied;
+};
+
+/**
+ * struct cam_req_mgr_req_tbl
+ * @id            : table indetifier
+ * @pd            : pipeline delay of table
+ * @dev_count     : num of devices having same pipeline delay
+ * @dev_mask      : mask to track which devices are linked
+ * @skip_traverse : to indicate how many traverses need to be dropped
+ *              by this table especially in the beginning or bubble recovery
+ * @next          : pointer to next pipeline delay request table
+ * @pd_delta      : differnce between this table's pipeline delay and next
+ * @num_slots     : number of request slots present in the table
+ * @slot          : array of slots tracking requests availability at devices
+ */
+struct cam_req_mgr_req_tbl {
+	int32_t                     id;
+	int32_t                     pd;
+	int32_t                     dev_count;
+	int32_t                     dev_mask;
+	int32_t                     skip_traverse;
+	struct cam_req_mgr_req_tbl *next;
+	int32_t                     pd_delta;
+	int32_t                     num_slots;
+	struct cam_req_mgr_tbl_slot slot[MAX_REQ_SLOTS];
+};
+
+/**
+ * struct cam_req_mgr_slot
+ * - Internal Book keeping
+ * @idx          : slot index
+ * @skip_idx     : if req id in this slot needs to be skipped/not applied
+ * @status       : state machine for life cycle of a slot
+ * - members updated due to external events
+ * @recover      : if user enabled recovery for this request.
+ * @req_id       : mask tracking which all devices have request ready
+ * @sync_mode    : Sync mode in which req id in this slot has to applied
+ */
+struct cam_req_mgr_slot {
+	int32_t               idx;
+	int32_t               skip_idx;
+	enum crm_slot_status  status;
+	int32_t               recover;
+	int64_t               req_id;
+	int32_t               sync_mode;
+};
+
+/**
+ * struct cam_req_mgr_req_queue
+ * @num_slots   : max num of input queue slots
+ * @slot        : request slot holding incoming request id and bubble info.
+ * @rd_idx      : indicates slot index currently in process.
+ * @wr_idx      : indicates slot index to hold new upcoming req.
+ */
+struct cam_req_mgr_req_queue {
+	int32_t                     num_slots;
+	struct cam_req_mgr_slot     slot[MAX_REQ_SLOTS];
+	int32_t                     rd_idx;
+	int32_t                     wr_idx;
+};
+
+/**
+ * struct cam_req_mgr_req_data
+ * @in_q        : Poiner to Input request queue
+ * @l_tbl       : unique pd request tables.
+ * @num_tbl     : how many unique pd value devices are present
+ * @apply_data	: Holds information about request id for a request
+ * @lock        : mutex lock protecting request data ops.
+ */
+struct cam_req_mgr_req_data {
+	struct cam_req_mgr_req_queue *in_q;
+	struct cam_req_mgr_req_tbl   *l_tbl;
+	int32_t                       num_tbl;
+	struct cam_req_mgr_apply      apply_data[CAM_PIPELINE_DELAY_MAX];
+	struct mutex                  lock;
+};
+
+/**
+ * struct cam_req_mgr_connected_device
+ * - Device Properties
+ * @dev_hdl  : device handle
+ * @dev_bit  : unique bit assigned to device in link
+ * - Device characteristics
+ * @pd_tbl   : tracks latest available req id at this device
+ * @dev_info : holds dev characteristics such as pipeline delay, dev name
+ * @ops      : holds func pointer to call methods on this device
+ * @parent   : pvt data - like link which this dev hdl belongs to
+ */
+struct cam_req_mgr_connected_device {
+	int32_t                         dev_hdl;
+	int64_t                         dev_bit;
+	struct cam_req_mgr_req_tbl     *pd_tbl;
+	struct cam_req_mgr_device_info  dev_info;
+	struct cam_req_mgr_kmd_ops     *ops;
+	void                           *parent;
+};
+
+/**
+ * struct cam_req_mgr_core_link
+ * -  Link Properties
+ * @link_hdl             : Link identifier
+ * @num_devs             : num of connected devices to this link
+ * @max_delay            : Max of pipeline delay of all connected devs
+ * @workq                : Pointer to handle workq related jobs
+ * @pd_mask              : each set bit indicates the device with pd equal to
+ *                          bit position is available.
+ * - List of connected devices
+ * @l_dev                : List of connected devices to this link
+ * - Request handling data struct
+ * @req                  : req data holder.
+ * - Timer
+ * @watchdog             : watchdog timer to recover from sof freeze
+ * - Link private data
+ * @workq_comp           : conditional variable to block user thread for workq
+ *                          to finish schedule request processing
+ * @state                : link state machine
+ * @parent               : pvt data - link's parent is session
+ * @lock                 : mutex lock to guard link data operations
+ * @link_state_spin_lock : spin lock to protect link state variable
+ * @subscribe_event      : irqs that link subscribes, IFE should send
+ *                         notification to CRM at those hw events.
+ * @trigger_mask         : mask on which irq the req is already applied
+ * @sync_link            : pointer to the sync link for synchronization
+ * @sof_counter          : sof counter during sync_mode
+ * @sync_self_ref        : reference sync count against which the difference
+ *                         between sync_counts for a given link is checked
+ * @frame_skip_flag      : flag that determines if a frame needs to be skipped
+ * @sync_link_sof_skip   : flag determines if a pkt is not available for a given
+ *                         frame in a particular link skip corresponding
+ *                         frame in sync link as well.
+ * @open_req_cnt         : Counter to keep track of open requests that are yet
+ *                         to be serviced in the kernel.
+ * @last_flush_id        : Last request to flush
+ *
+ */
+struct cam_req_mgr_core_link {
+	int32_t                              link_hdl;
+	int32_t                              num_devs;
+	enum cam_pipeline_delay              max_delay;
+	struct cam_req_mgr_core_workq       *workq;
+	int32_t                              pd_mask;
+	struct cam_req_mgr_connected_device *l_dev;
+	struct cam_req_mgr_req_data          req;
+	struct cam_req_mgr_timer            *watchdog;
+	struct completion                    workq_comp;
+	enum cam_req_mgr_link_state          state;
+	void                                *parent;
+	struct mutex                         lock;
+	spinlock_t                           link_state_spin_lock;
+	uint32_t                             subscribe_event;
+	uint32_t                             trigger_mask;
+	struct cam_req_mgr_core_link        *sync_link;
+	int64_t                              sof_counter;
+	int64_t                              sync_self_ref;
+	bool                                 frame_skip_flag;
+	bool                                 sync_link_sof_skip;
+	int32_t                              open_req_cnt;
+	uint32_t                             last_flush_id;
+};
+
+/**
+ * struct cam_req_mgr_core_session
+ * - Session Properties
+ * @session_hdl        : session identifier
+ * @num_links          : num of active links for current session
+ * - Links of this session
+ * @links              : pointer to array of links within session
+ * @in_q               : Input request queue one per session
+ * - Session private data
+ * @entry              : pvt data - entry in the list of sessions
+ * @lock               : pvt data - spin lock to guard session data
+ * - Debug data
+ * @force_err_recovery : For debugging, we can force bubble recovery
+ *                       to be always ON or always OFF using debugfs.
+ * @sync_mode          : Sync mode for this session links
+ */
+struct cam_req_mgr_core_session {
+	int32_t                       session_hdl;
+	uint32_t                      num_links;
+	struct cam_req_mgr_core_link *links[MAXIMUM_LINKS_PER_SESSION];
+	struct list_head              entry;
+	struct mutex                  lock;
+	int32_t                       force_err_recovery;
+	int32_t                       sync_mode;
+};
+
+/**
+ * struct cam_req_mgr_core_device
+ * - Core camera request manager data struct
+ * @session_head : list head holding sessions
+ * @crm_lock     : mutex lock to protect session creation & destruction
+ */
+struct cam_req_mgr_core_device {
+	struct list_head             session_head;
+	struct mutex                 crm_lock;
+};
+
+/**
+ * cam_req_mgr_create_session()
+ * @brief    : creates session
+ * @ses_info : output param for session handle
+ *
+ * called as part of session creation.
+ */
+int cam_req_mgr_create_session(struct cam_req_mgr_session_info *ses_info);
+
+/**
+ * cam_req_mgr_destroy_session()
+ * @brief    : destroy session
+ * @ses_info : session handle info, input param
+ *
+ * Called as part of session destroy
+ * return success/failure
+ */
+int cam_req_mgr_destroy_session(struct cam_req_mgr_session_info *ses_info);
+
+/**
+ * cam_req_mgr_link()
+ * @brief     : creates a link for a session
+ * @link_info : handle and session info to create a link
+ *
+ * link is formed in a session for multiple devices. it creates
+ * a unique link handle for the link and is specific to a
+ * session. Returns link handle
+ */
+int cam_req_mgr_link(struct cam_req_mgr_link_info *link_info);
+
+/**
+ * cam_req_mgr_unlink()
+ * @brief       : destroy a link in a session
+ * @unlink_info : session and link handle info
+ *
+ * link is destroyed in a session
+ */
+int cam_req_mgr_unlink(struct cam_req_mgr_unlink_info *unlink_info);
+
+/**
+ * cam_req_mgr_schedule_request()
+ * @brief: Request is scheduled
+ * @sched_req: request id, session and link id info, bubble recovery info
+ */
+int cam_req_mgr_schedule_request(
+	struct cam_req_mgr_sched_request *sched_req);
+
+/**
+ * cam_req_mgr_sync_mode_setup()
+ * @brief: sync for links in a session
+ * @sync_info: session, links info and master link info
+ */
+int cam_req_mgr_sync_config(struct cam_req_mgr_sync_mode *sync_info);
+
+/**
+ * cam_req_mgr_flush_requests()
+ * @brief: flush all requests
+ * @flush_info: requests related to link and session
+ */
+int cam_req_mgr_flush_requests(
+	struct cam_req_mgr_flush_info *flush_info);
+
+/**
+ * cam_req_mgr_core_device_init()
+ * @brief: initialize crm core
+ */
+int cam_req_mgr_core_device_init(void);
+
+/**
+ * cam_req_mgr_core_device_deinit()
+ * @brief: cleanp crm core
+ */
+int cam_req_mgr_core_device_deinit(void);
+
+/**
+ * cam_req_mgr_handle_core_shutdown()
+ * @brief: Handles camera close
+ */
+void cam_req_mgr_handle_core_shutdown(void);
+
+/**
+ * cam_req_mgr_link_control()
+ * @brief:   Handles link control operations
+ * @control: Link control command
+ */
+int cam_req_mgr_link_control(struct cam_req_mgr_link_control *control);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
new file mode 100644
index 0000000..409a3be
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core_defs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_REQ_MGR_CORE_DEFS_H_
+#define _CAM_REQ_MGR_CORE_DEFS_H_
+
+#define CRM_TRACE_ENABLE 0
+#define CRM_DEBUG_MUTEX 0
+
+#define SET_SUCCESS_BIT(ret, pd)  (ret |= (1 << (pd)))
+
+#define SET_FAILURE_BIT(ret, pd)  (ret &= (~(1 << (pd))))
+
+#define CRM_GET_REQ_ID(in_q, idx) in_q->slot[idx].req_id
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c
new file mode 100644
index 0000000..d02cda9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_req_mgr_debug.h"
+
+#define MAX_SESS_INFO_LINE_BUFF_LEN 256
+
+static char sess_info_buffer[MAX_SESS_INFO_LINE_BUFF_LEN];
+
+static int cam_req_mgr_debug_set_bubble_recovery(void *data, u64 val)
+{
+	struct cam_req_mgr_core_device  *core_dev = data;
+	struct cam_req_mgr_core_session *session;
+	int rc = 0;
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		list_for_each_entry(session,
+			&core_dev->session_head, entry) {
+			session->force_err_recovery = val;
+		}
+	}
+
+	mutex_unlock(&core_dev->crm_lock);
+
+	return rc;
+}
+
+static int cam_req_mgr_debug_get_bubble_recovery(void *data, u64 *val)
+{
+	struct cam_req_mgr_core_device *core_dev = data;
+	struct cam_req_mgr_core_session *session;
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		session = list_first_entry(&core_dev->session_head,
+			struct cam_req_mgr_core_session,
+			entry);
+		*val = session->force_err_recovery;
+	}
+	mutex_unlock(&core_dev->crm_lock);
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(bubble_recovery, cam_req_mgr_debug_get_bubble_recovery,
+	cam_req_mgr_debug_set_bubble_recovery, "%lld\n");
+
+static int session_info_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t session_info_read(struct file *t_file, char *t_char,
+	size_t t_size_t, loff_t *t_loff_t)
+{
+	int i;
+	char *out_buffer = sess_info_buffer;
+	char line_buffer[MAX_SESS_INFO_LINE_BUFF_LEN] = {0};
+	struct cam_req_mgr_core_device *core_dev =
+		(struct cam_req_mgr_core_device *) t_file->private_data;
+	struct cam_req_mgr_core_session *session;
+
+	memset(out_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+	mutex_lock(&core_dev->crm_lock);
+
+	if (!list_empty(&core_dev->session_head)) {
+		list_for_each_entry(session,
+			&core_dev->session_head, entry) {
+			snprintf(line_buffer, sizeof(line_buffer),
+				"session_hdl = %x \t"
+				"num_links = %d\n",
+				session->session_hdl, session->num_links);
+			strlcat(out_buffer, line_buffer,
+				sizeof(sess_info_buffer));
+			for (i = 0; i < session->num_links; i++) {
+				snprintf(line_buffer, sizeof(line_buffer),
+					"link_hdl[%d] = 0x%x, num_devs connected = %d\n",
+					i, session->links[i]->link_hdl,
+					session->links[i]->num_devs);
+				strlcat(out_buffer, line_buffer,
+					sizeof(sess_info_buffer));
+			}
+		}
+	}
+
+	mutex_unlock(&core_dev->crm_lock);
+
+	return simple_read_from_buffer(t_char, t_size_t,
+		t_loff_t, out_buffer, strlen(out_buffer));
+}
+
+static ssize_t session_info_write(struct file *t_file,
+	const char *t_char, size_t t_size_t, loff_t *t_loff_t)
+{
+	memset(sess_info_buffer, 0, MAX_SESS_INFO_LINE_BUFF_LEN);
+
+	return 0;
+}
+
+static const struct file_operations session_info = {
+	.open = session_info_open,
+	.read = session_info_read,
+	.write = session_info_write,
+};
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev)
+{
+	struct dentry *debugfs_root;
+	char dirname[32] = {0};
+
+	snprintf(dirname, sizeof(dirname), "cam_req_mgr");
+	debugfs_root = debugfs_create_dir(dirname, NULL);
+	if (!debugfs_root)
+		return -ENOMEM;
+
+	if (!debugfs_create_file("sessions_info", 0644,
+		debugfs_root, core_dev, &session_info))
+		return -ENOMEM;
+
+	if (!debugfs_create_file("bubble_recovery", 0644,
+		debugfs_root, core_dev, &bubble_recovery))
+		return -ENOMEM;
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h
new file mode 100644
index 0000000..8f31e97
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_debug.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_REQ_MGR_DEBUG_H_
+#define _CAM_REQ_MGR_DEBUG_H_
+
+#include <linux/debugfs.h>
+#include "cam_req_mgr_core.h"
+
+int cam_req_mgr_debug_register(struct cam_req_mgr_core_device *core_dev);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
new file mode 100644
index 0000000..df6d6ec
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_req_mgr.h>
+#include <media/cam_defs.h>
+#include "cam_req_mgr_dev.h"
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_core.h"
+#include "cam_subdev.h"
+#include "cam_mem_mgr.h"
+#include "cam_debug_util.h"
+#include "cam_common_util.h"
+#include <linux/slub_def.h>
+
+#define CAM_REQ_MGR_EVENT_MAX 30
+
+static struct cam_req_mgr_device g_dev;
+struct kmem_cache *g_cam_req_mgr_timer_cachep;
+
+static int cam_media_device_setup(struct device *dev)
+{
+	int rc;
+
+	g_dev.v4l2_dev->mdev = kzalloc(sizeof(*g_dev.v4l2_dev->mdev),
+		GFP_KERNEL);
+	if (!g_dev.v4l2_dev->mdev) {
+		rc = -ENOMEM;
+		goto mdev_fail;
+	}
+
+	media_device_init(g_dev.v4l2_dev->mdev);
+	g_dev.v4l2_dev->mdev->dev = dev;
+	strlcpy(g_dev.v4l2_dev->mdev->model, CAM_REQ_MGR_VNODE_NAME,
+		sizeof(g_dev.v4l2_dev->mdev->model));
+
+	rc = media_device_register(g_dev.v4l2_dev->mdev);
+	if (rc)
+		goto media_fail;
+
+	return rc;
+
+media_fail:
+	kfree(g_dev.v4l2_dev->mdev);
+	g_dev.v4l2_dev->mdev = NULL;
+mdev_fail:
+	return rc;
+}
+
+static void cam_media_device_cleanup(void)
+{
+	media_entity_cleanup(&g_dev.video->entity);
+	media_device_unregister(g_dev.v4l2_dev->mdev);
+	kfree(g_dev.v4l2_dev->mdev);
+	g_dev.v4l2_dev->mdev = NULL;
+}
+
+static int cam_v4l2_device_setup(struct device *dev)
+{
+	int rc;
+
+	g_dev.v4l2_dev = kzalloc(sizeof(*g_dev.v4l2_dev),
+		GFP_KERNEL);
+	if (!g_dev.v4l2_dev)
+		return -ENOMEM;
+
+	rc = v4l2_device_register(dev, g_dev.v4l2_dev);
+	if (rc)
+		goto reg_fail;
+
+	return rc;
+
+reg_fail:
+	kfree(g_dev.v4l2_dev);
+	g_dev.v4l2_dev = NULL;
+	return rc;
+}
+
+static void cam_v4l2_device_cleanup(void)
+{
+	v4l2_device_unregister(g_dev.v4l2_dev);
+	kfree(g_dev.v4l2_dev);
+	g_dev.v4l2_dev = NULL;
+}
+
+static int cam_req_mgr_open(struct file *filep)
+{
+	int rc;
+
+	mutex_lock(&g_dev.cam_lock);
+	if (g_dev.open_cnt >= 1) {
+		rc = -EALREADY;
+		goto end;
+	}
+
+	rc = v4l2_fh_open(filep);
+	if (rc) {
+		CAM_ERR(CAM_CRM, "v4l2_fh_open failed: %d", rc);
+		goto end;
+	}
+
+	spin_lock_bh(&g_dev.cam_eventq_lock);
+	g_dev.cam_eventq = filep->private_data;
+	spin_unlock_bh(&g_dev.cam_eventq_lock);
+
+	g_dev.open_cnt++;
+	rc = cam_mem_mgr_init();
+	if (rc) {
+		g_dev.open_cnt--;
+		CAM_ERR(CAM_CRM, "mem mgr init failed");
+		goto mem_mgr_init_fail;
+	}
+
+	mutex_unlock(&g_dev.cam_lock);
+	return rc;
+
+mem_mgr_init_fail:
+	v4l2_fh_release(filep);
+end:
+	mutex_unlock(&g_dev.cam_lock);
+	return rc;
+}
+
+static unsigned int cam_req_mgr_poll(struct file *f,
+	struct poll_table_struct *pll_table)
+{
+	int rc = 0;
+	struct v4l2_fh *eventq = f->private_data;
+
+	if (!eventq)
+		return -EINVAL;
+
+	poll_wait(f, &eventq->wait, pll_table);
+	if (v4l2_event_pending(eventq))
+		rc = POLLPRI;
+
+	return rc;
+}
+
+static int cam_req_mgr_close(struct file *filep)
+{
+	struct v4l2_subdev *sd;
+	struct v4l2_fh *vfh = filep->private_data;
+	struct v4l2_subdev_fh *subdev_fh = to_v4l2_subdev_fh(vfh);
+
+	mutex_lock(&g_dev.cam_lock);
+
+	if (g_dev.open_cnt <= 0) {
+		mutex_unlock(&g_dev.cam_lock);
+		return -EINVAL;
+	}
+
+	cam_req_mgr_handle_core_shutdown();
+
+	list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) {
+		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+			continue;
+		if (sd->internal_ops && sd->internal_ops->close) {
+			CAM_DBG(CAM_CRM, "Invoke subdev close for device %s",
+				sd->name);
+			sd->internal_ops->close(sd, subdev_fh);
+		}
+	}
+
+	g_dev.open_cnt--;
+	v4l2_fh_release(filep);
+
+	spin_lock_bh(&g_dev.cam_eventq_lock);
+	g_dev.cam_eventq = NULL;
+	spin_unlock_bh(&g_dev.cam_eventq_lock);
+
+	cam_req_mgr_util_free_hdls();
+	cam_mem_mgr_deinit();
+	mutex_unlock(&g_dev.cam_lock);
+
+	return 0;
+}
+
+static struct v4l2_file_operations g_cam_fops = {
+	.owner  = THIS_MODULE,
+	.open   = cam_req_mgr_open,
+	.poll   = cam_req_mgr_poll,
+	.release = cam_req_mgr_close,
+	.unlocked_ioctl   = video_ioctl2,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = video_ioctl2,
+#endif
+};
+
+static int cam_subscribe_event(struct v4l2_fh *fh,
+	const struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_subscribe(fh, sub, CAM_REQ_MGR_EVENT_MAX, NULL);
+}
+
+static int cam_unsubscribe_event(struct v4l2_fh *fh,
+	const struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static long cam_private_ioctl(struct file *file, void *fh,
+	bool valid_prio, unsigned int cmd, void *arg)
+{
+	int rc;
+	struct cam_control *k_ioctl;
+
+	if ((!arg) || (cmd != VIDIOC_CAM_CONTROL))
+		return -EINVAL;
+
+	k_ioctl = (struct cam_control *)arg;
+
+	if (!k_ioctl->handle)
+		return -EINVAL;
+
+	switch (k_ioctl->op_code) {
+	case CAM_REQ_MGR_CREATE_SESSION: {
+		struct cam_req_mgr_session_info ses_info;
+
+		if (k_ioctl->size != sizeof(ses_info))
+			return -EINVAL;
+
+		if (copy_from_user(&ses_info,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_session_info))) {
+			return -EFAULT;
+		}
+
+		rc = cam_req_mgr_create_session(&ses_info);
+		if (!rc)
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
+				&ses_info,
+				sizeof(struct cam_req_mgr_session_info)))
+				rc = -EFAULT;
+		}
+		break;
+
+	case CAM_REQ_MGR_DESTROY_SESSION: {
+		struct cam_req_mgr_session_info ses_info;
+
+		if (k_ioctl->size != sizeof(ses_info))
+			return -EINVAL;
+
+		if (copy_from_user(&ses_info,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_session_info))) {
+			return -EFAULT;
+		}
+
+		rc = cam_req_mgr_destroy_session(&ses_info);
+		}
+		break;
+
+	case CAM_REQ_MGR_LINK: {
+		struct cam_req_mgr_link_info link_info;
+
+		if (k_ioctl->size != sizeof(link_info))
+			return -EINVAL;
+
+		if (copy_from_user(&link_info,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_link_info))) {
+			return -EFAULT;
+		}
+
+		rc = cam_req_mgr_link(&link_info);
+		if (!rc)
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
+				&link_info,
+				sizeof(struct cam_req_mgr_link_info)))
+				rc = -EFAULT;
+		}
+		break;
+
+	case CAM_REQ_MGR_UNLINK: {
+		struct cam_req_mgr_unlink_info unlink_info;
+
+		if (k_ioctl->size != sizeof(unlink_info))
+			return -EINVAL;
+
+		if (copy_from_user(&unlink_info,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_unlink_info))) {
+			return -EFAULT;
+		}
+
+		rc = cam_req_mgr_unlink(&unlink_info);
+		}
+		break;
+
+	case CAM_REQ_MGR_SCHED_REQ: {
+		struct cam_req_mgr_sched_request sched_req;
+
+		if (k_ioctl->size != sizeof(sched_req))
+			return -EINVAL;
+
+		if (copy_from_user(&sched_req,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_sched_request))) {
+			return -EFAULT;
+		}
+
+		rc = cam_req_mgr_schedule_request(&sched_req);
+		}
+		break;
+
+	case CAM_REQ_MGR_FLUSH_REQ: {
+		struct cam_req_mgr_flush_info flush_info;
+
+		if (k_ioctl->size != sizeof(flush_info))
+			return -EINVAL;
+
+		if (copy_from_user(&flush_info,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_flush_info))) {
+			return -EFAULT;
+		}
+
+		rc = cam_req_mgr_flush_requests(&flush_info);
+		}
+		break;
+
+	case CAM_REQ_MGR_SYNC_MODE: {
+		struct cam_req_mgr_sync_mode sync_info;
+
+		if (k_ioctl->size != sizeof(sync_info))
+			return -EINVAL;
+
+		if (copy_from_user(&sync_info,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_sync_mode))) {
+			return -EFAULT;
+		}
+
+		rc = cam_req_mgr_sync_config(&sync_info);
+		}
+		break;
+	case CAM_REQ_MGR_ALLOC_BUF: {
+		struct cam_mem_mgr_alloc_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_mem_mgr_alloc_cmd))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_alloc_and_map(&cmd);
+		if (!rc)
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
+				&cmd, sizeof(struct cam_mem_mgr_alloc_cmd))) {
+				rc = -EFAULT;
+				break;
+			}
+		}
+		break;
+	case CAM_REQ_MGR_MAP_BUF: {
+		struct cam_mem_mgr_map_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_mem_mgr_map_cmd))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_map(&cmd);
+		if (!rc)
+			if (copy_to_user(
+				u64_to_user_ptr(k_ioctl->handle),
+				&cmd, sizeof(struct cam_mem_mgr_map_cmd))) {
+				rc = -EFAULT;
+				break;
+			}
+		}
+		break;
+	case CAM_REQ_MGR_RELEASE_BUF: {
+		struct cam_mem_mgr_release_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_mem_mgr_release_cmd))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_release(&cmd);
+		}
+		break;
+	case CAM_REQ_MGR_CACHE_OPS: {
+		struct cam_mem_cache_ops_cmd cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_mem_cache_ops_cmd))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_mem_mgr_cache_ops(&cmd);
+		if (rc)
+			rc = -EINVAL;
+		}
+		break;
+	case CAM_REQ_MGR_LINK_CONTROL: {
+		struct cam_req_mgr_link_control cmd;
+
+		if (k_ioctl->size != sizeof(cmd))
+			return -EINVAL;
+
+		if (copy_from_user(&cmd,
+			u64_to_user_ptr(k_ioctl->handle),
+			sizeof(struct cam_req_mgr_link_control))) {
+			rc = -EFAULT;
+			break;
+		}
+
+		rc = cam_req_mgr_link_control(&cmd);
+		if (rc)
+			rc = -EINVAL;
+		}
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return rc;
+}
+
+static const struct v4l2_ioctl_ops g_cam_ioctl_ops = {
+	.vidioc_subscribe_event = cam_subscribe_event,
+	.vidioc_unsubscribe_event = cam_unsubscribe_event,
+	.vidioc_default = cam_private_ioctl,
+};
+
+static int cam_video_device_setup(void)
+{
+	int rc;
+
+	g_dev.video = video_device_alloc();
+	if (!g_dev.video) {
+		rc = -ENOMEM;
+		goto video_fail;
+	}
+
+	g_dev.video->v4l2_dev = g_dev.v4l2_dev;
+
+	strlcpy(g_dev.video->name, "cam-req-mgr",
+		sizeof(g_dev.video->name));
+	g_dev.video->release = video_device_release;
+	g_dev.video->fops = &g_cam_fops;
+	g_dev.video->ioctl_ops = &g_cam_ioctl_ops;
+	g_dev.video->minor = -1;
+	g_dev.video->vfl_type = VFL_TYPE_GRABBER;
+	rc = video_register_device(g_dev.video, VFL_TYPE_GRABBER, -1);
+	if (rc)
+		goto v4l2_fail;
+
+	rc = media_entity_pads_init(&g_dev.video->entity, 0, NULL);
+	if (rc)
+		goto entity_fail;
+
+	g_dev.video->entity.function = CAM_VNODE_DEVICE_TYPE;
+	g_dev.video->entity.name = video_device_node_name(g_dev.video);
+
+	return rc;
+
+entity_fail:
+	video_unregister_device(g_dev.video);
+v4l2_fail:
+	video_device_release(g_dev.video);
+	g_dev.video = NULL;
+video_fail:
+	return rc;
+}
+
+int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg,
+	uint32_t id,
+	uint32_t type)
+{
+	struct v4l2_event event;
+	struct cam_req_mgr_message *ev_header;
+
+	if (!msg)
+		return -EINVAL;
+
+	event.id = id;
+	event.type = type;
+	ev_header = CAM_REQ_MGR_GET_PAYLOAD_PTR(event,
+		struct cam_req_mgr_message);
+	memcpy(ev_header, msg, sizeof(struct cam_req_mgr_message));
+	v4l2_event_queue(g_dev.video, &event);
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_req_mgr_notify_message);
+
+void cam_video_device_cleanup(void)
+{
+	video_unregister_device(g_dev.video);
+	video_device_release(g_dev.video);
+	g_dev.video = NULL;
+}
+
+void cam_register_subdev_fops(struct v4l2_file_operations *fops)
+{
+	*fops = v4l2_subdev_fops;
+}
+EXPORT_SYMBOL(cam_register_subdev_fops);
+
+int cam_register_subdev(struct cam_subdev *csd)
+{
+	struct v4l2_subdev *sd;
+	int rc;
+
+	if (g_dev.state != true) {
+		CAM_ERR(CAM_CRM, "camera root device not ready yet");
+		return -ENODEV;
+	}
+
+	if (!csd || !csd->name) {
+		CAM_ERR(CAM_CRM, "invalid arguments");
+		return -EINVAL;
+	}
+
+	mutex_lock(&g_dev.dev_lock);
+	if ((g_dev.subdev_nodes_created) &&
+		(csd->sd_flags & V4L2_SUBDEV_FL_HAS_DEVNODE)) {
+		CAM_ERR(CAM_CRM,
+			"dynamic node is not allowed, name: %s, type :%d",
+			csd->name, csd->ent_function);
+		rc = -EINVAL;
+		goto reg_fail;
+	}
+
+	sd = &csd->sd;
+	v4l2_subdev_init(sd, csd->ops);
+	sd->internal_ops = csd->internal_ops;
+	snprintf(sd->name, ARRAY_SIZE(sd->name), csd->name);
+	v4l2_set_subdevdata(sd, csd->token);
+
+	sd->flags = csd->sd_flags;
+	sd->entity.num_pads = 0;
+	sd->entity.pads = NULL;
+	sd->entity.function = csd->ent_function;
+
+	rc = v4l2_device_register_subdev(g_dev.v4l2_dev, sd);
+	if (rc) {
+		CAM_ERR(CAM_CRM, "register subdev failed");
+		goto reg_fail;
+	}
+	g_dev.count++;
+
+reg_fail:
+	mutex_unlock(&g_dev.dev_lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_register_subdev);
+
+int cam_unregister_subdev(struct cam_subdev *csd)
+{
+	if (g_dev.state != true) {
+		CAM_ERR(CAM_CRM, "camera root device not ready yet");
+		return -ENODEV;
+	}
+
+	mutex_lock(&g_dev.dev_lock);
+	v4l2_device_unregister_subdev(&csd->sd);
+	g_dev.count--;
+	mutex_unlock(&g_dev.dev_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_unregister_subdev);
+
+static int cam_req_mgr_remove(struct platform_device *pdev)
+{
+	cam_req_mgr_core_device_deinit();
+	cam_req_mgr_util_deinit();
+	cam_media_device_cleanup();
+	cam_video_device_cleanup();
+	cam_v4l2_device_cleanup();
+	mutex_destroy(&g_dev.dev_lock);
+	g_dev.state = false;
+	g_dev.subdev_nodes_created = false;
+
+	return 0;
+}
+
+static int cam_req_mgr_probe(struct platform_device *pdev)
+{
+	int rc;
+
+	rc = cam_v4l2_device_setup(&pdev->dev);
+	if (rc)
+		return rc;
+
+	rc = cam_media_device_setup(&pdev->dev);
+	if (rc)
+		goto media_setup_fail;
+
+	rc = cam_video_device_setup();
+	if (rc)
+		goto video_setup_fail;
+
+	g_dev.open_cnt = 0;
+	mutex_init(&g_dev.cam_lock);
+	spin_lock_init(&g_dev.cam_eventq_lock);
+	g_dev.subdev_nodes_created = false;
+	mutex_init(&g_dev.dev_lock);
+
+	rc = cam_req_mgr_util_init();
+	if (rc) {
+		CAM_ERR(CAM_CRM, "cam req mgr util init is failed");
+		goto req_mgr_util_fail;
+	}
+
+	rc = cam_req_mgr_core_device_init();
+	if (rc) {
+		CAM_ERR(CAM_CRM, "core device setup failed");
+		goto req_mgr_core_fail;
+	}
+
+	g_dev.state = true;
+
+	if (g_cam_req_mgr_timer_cachep == NULL) {
+		g_cam_req_mgr_timer_cachep = kmem_cache_create("crm_timer",
+			sizeof(struct cam_req_mgr_timer), 64,
+			SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE |
+			SLAB_POISON | SLAB_STORE_USER, NULL);
+		if (!g_cam_req_mgr_timer_cachep)
+			CAM_ERR(CAM_CRM,
+				"Failed to create kmem_cache for crm_timer");
+		else
+			CAM_DBG(CAM_CRM, "Name : %s",
+				g_cam_req_mgr_timer_cachep->name);
+	}
+
+	return rc;
+
+req_mgr_core_fail:
+	cam_req_mgr_util_deinit();
+req_mgr_util_fail:
+	mutex_destroy(&g_dev.dev_lock);
+	mutex_destroy(&g_dev.cam_lock);
+	cam_video_device_cleanup();
+video_setup_fail:
+	cam_media_device_cleanup();
+media_setup_fail:
+	cam_v4l2_device_cleanup();
+	return rc;
+}
+
+static const struct of_device_id cam_req_mgr_dt_match[] = {
+	{.compatible = "qcom,cam-req-mgr"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_dt_match);
+
+static struct platform_driver cam_req_mgr_driver = {
+	.probe = cam_req_mgr_probe,
+	.remove = cam_req_mgr_remove,
+	.driver = {
+		.name = "cam_req_mgr",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_req_mgr_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+int cam_dev_mgr_create_subdev_nodes(void)
+{
+	int rc;
+	struct v4l2_subdev *sd;
+
+	if (!g_dev.v4l2_dev)
+		return -EINVAL;
+
+	if (g_dev.state != true) {
+		CAM_ERR(CAM_CRM, "camera root device not ready yet");
+		return -ENODEV;
+	}
+
+	mutex_lock(&g_dev.dev_lock);
+	if (g_dev.subdev_nodes_created)	{
+		rc = -EEXIST;
+		goto create_fail;
+	}
+
+	rc = v4l2_device_register_subdev_nodes(g_dev.v4l2_dev);
+	if (rc) {
+		CAM_ERR(CAM_CRM, "failed to register the sub devices");
+		goto create_fail;
+	}
+
+	list_for_each_entry(sd, &g_dev.v4l2_dev->subdevs, list) {
+		if (!(sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE))
+			continue;
+		sd->entity.name = video_device_node_name(sd->devnode);
+		CAM_DBG(CAM_CRM, "created node :%s", sd->entity.name);
+	}
+
+	g_dev.subdev_nodes_created = true;
+
+create_fail:
+	mutex_unlock(&g_dev.dev_lock);
+	return rc;
+}
+
+static int __init cam_req_mgr_init(void)
+{
+	return platform_driver_register(&cam_req_mgr_driver);
+}
+
+static int __init cam_req_mgr_late_init(void)
+{
+	return cam_dev_mgr_create_subdev_nodes();
+}
+
+static void __exit cam_req_mgr_exit(void)
+{
+	platform_driver_unregister(&cam_req_mgr_driver);
+}
+
+module_init(cam_req_mgr_init);
+late_initcall(cam_req_mgr_late_init);
+module_exit(cam_req_mgr_exit);
+MODULE_DESCRIPTION("Camera Request Manager");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
new file mode 100644
index 0000000..f80c404
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_dev.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_REQ_MGR_DEV_H_
+#define _CAM_REQ_MGR_DEV_H_
+
+/**
+ * struct cam_req_mgr_device - a camera request manager device
+ *
+ * @video: pointer to struct video device.
+ * @v4l2_dev: pointer to struct v4l2 device.
+ * @subdev_nodes_created: flag to check the created state.
+ * @count: number of subdevices registered.
+ * @dev_lock: lock for the subdevice count.
+ * @state: state of the root device.
+ * @open_cnt: open count of subdev
+ * @cam_lock: per file handle lock
+ * @cam_eventq: event queue
+ * @cam_eventq_lock: lock for event queue
+ */
+struct cam_req_mgr_device {
+	struct video_device *video;
+	struct v4l2_device *v4l2_dev;
+	bool subdev_nodes_created;
+	int count;
+	struct mutex dev_lock;
+	bool state;
+	int32_t open_cnt;
+	struct mutex cam_lock;
+	struct v4l2_fh  *cam_eventq;
+	spinlock_t cam_eventq_lock;
+};
+
+#define CAM_REQ_MGR_GET_PAYLOAD_PTR(ev, type)        \
+	(type *)((char *)ev.u.data)
+
+int cam_req_mgr_notify_message(struct cam_req_mgr_message *msg,
+	uint32_t id,
+	uint32_t type);
+
+#endif /* _CAM_REQ_MGR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
new file mode 100644
index 0000000..b760713
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_interface.h
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_REQ_MGR_INTERFACE_H
+#define _CAM_REQ_MGR_INTERFACE_H
+
+#include <linux/types.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_core_defs.h"
+#include "cam_req_mgr_util.h"
+
+struct cam_req_mgr_trigger_notify;
+struct cam_req_mgr_error_notify;
+struct cam_req_mgr_add_request;
+struct cam_req_mgr_device_info;
+struct cam_req_mgr_core_dev_link_setup;
+struct cam_req_mgr_apply_request;
+struct cam_req_mgr_flush_request;
+struct cam_req_mgr_link_evt_data;
+
+#define SKIP_NEXT_FRAME 0x100
+
+/* Request Manager -- camera device driver interface */
+/**
+ * @brief: camera kernel drivers to cam req mgr communication
+ *
+ * @cam_req_mgr_notify_trigger: for device which generates trigger to inform CRM
+ * @cam_req_mgr_notify_err    : device use this to inform about different errors
+ * @cam_req_mgr_add_req       : to info CRm about new rqeuest received from
+ *                              userspace
+ */
+typedef int (*cam_req_mgr_notify_trigger)(
+	struct cam_req_mgr_trigger_notify *);
+typedef int (*cam_req_mgr_notify_err)(struct cam_req_mgr_error_notify *);
+typedef int (*cam_req_mgr_add_req)(struct cam_req_mgr_add_request *);
+
+/**
+ * @brief: cam req mgr to camera device drivers
+ *
+ * @cam_req_mgr_get_dev_info: to fetch details about device linked
+ * @cam_req_mgr_link_setup  : to establish link with device for a session
+ * @cam_req_mgr_notify_err  : to broadcast error happened on link for request id
+ * @cam_req_mgr_apply_req   : CRM asks device to apply certain request id.
+ * @cam_req_mgr_flush_req   : Flush or cancel request
+ * cam_req_mgr_process_evt  : generic events
+ */
+typedef int (*cam_req_mgr_get_dev_info) (struct cam_req_mgr_device_info *);
+typedef int (*cam_req_mgr_link_setup)(
+	struct cam_req_mgr_core_dev_link_setup *);
+typedef int (*cam_req_mgr_apply_req)(struct cam_req_mgr_apply_request *);
+typedef int (*cam_req_mgr_flush_req)(struct cam_req_mgr_flush_request *);
+typedef int (*cam_req_mgr_process_evt)(struct cam_req_mgr_link_evt_data *);
+
+/**
+ * @brief          : cam_req_mgr_crm_cb - func table
+ *
+ * @notify_trigger : payload for trigger indication event
+ * @notify_err     : payload for different error occurred at device
+ * @add_req        : payload to inform which device and what request is received
+ */
+struct cam_req_mgr_crm_cb {
+	cam_req_mgr_notify_trigger  notify_trigger;
+	cam_req_mgr_notify_err      notify_err;
+	cam_req_mgr_add_req         add_req;
+};
+
+/**
+ * @brief        : cam_req_mgr_kmd_ops - func table
+ *
+ * @get_dev_info : payload to fetch device details
+ * @link_setup   : payload to establish link with device
+ * @apply_req    : payload to apply request id on a device linked
+ * @flush_req    : payload to flush request
+ * @process_evt  : payload to generic event
+ */
+struct cam_req_mgr_kmd_ops {
+	cam_req_mgr_get_dev_info     get_dev_info;
+	cam_req_mgr_link_setup       link_setup;
+	cam_req_mgr_apply_req        apply_req;
+	cam_req_mgr_flush_req        flush_req;
+	cam_req_mgr_process_evt      process_evt;
+};
+
+/**
+ * enum cam_pipeline_delay
+ * @brief     : enumerator for different pipeline delays in camera
+ *
+ * @DELAY_0   : device processed settings on same frame
+ * @DELAY_1   : device processed settings after 1 frame
+ * @DELAY_2   : device processed settings after 2 frames
+ * @DELAY_MAX : maximum supported pipeline delay
+ */
+enum cam_pipeline_delay {
+	CAM_PIPELINE_DELAY_0,
+	CAM_PIPELINE_DELAY_1,
+	CAM_PIPELINE_DELAY_2,
+	CAM_PIPELINE_DELAY_MAX,
+};
+
+/**
+ * @CAM_TRIGGER_POINT_SOF   : Trigger point for SOF
+ * @CAM_TRIGGER_POINT_EOF   : Trigger point for EOF
+ */
+#define CAM_TRIGGER_POINT_SOF     (1 << 0)
+#define CAM_TRIGGER_POINT_EOF     (1 << 1)
+
+/**
+ * enum cam_req_status
+ * @brief   : enumerator for request status
+ *
+ * @SUCCESS : device processed settings successfully
+ * @FAILED  : device processed settings failed
+ * @MAX     : invalid status value
+ */
+enum cam_req_status {
+	CAM_REQ_STATUS_SUCCESS,
+	CAM_REQ_STATUS_FAILED,
+	CAM_REQ_STATUS_MAX,
+};
+
+/**
+ * enum cam_req_mgr_device_error
+ * @brief      : enumerator for different errors occurred at device
+ *
+ * @NOT_FOUND  : settings asked by request manager is not found
+ * @BUBBLE     : device hit timing issue and is able to recover
+ * @FATAL      : device is in bad shape and can not recover from error
+ * @PAGE_FAULT : Page fault while accessing memory
+ * @OVERFLOW   : Bus Overflow for IFE/VFE
+ * @TIMEOUT    : Timeout from cci or bus.
+ * @MAX        : Invalid error value
+ */
+enum cam_req_mgr_device_error {
+	CRM_KMD_ERR_NOT_FOUND,
+	CRM_KMD_ERR_BUBBLE,
+	CRM_KMD_ERR_FATAL,
+	CRM_KMD_ERR_PAGE_FAULT,
+	CRM_KMD_ERR_OVERFLOW,
+	CRM_KMD_ERR_TIMEOUT,
+	CRM_KMD_ERR_MAX,
+};
+
+/**
+ * enum cam_req_mgr_device_id
+ * @brief       : enumerator for different devices in subsystem
+ *
+ * @CAM_REQ_MGR : request manager itself
+ * @SENSOR      : sensor device
+ * @FLASH       : LED flash or dual LED device
+ * @ACTUATOR    : lens mover
+ * @IFE         : Image processing device
+ * @EXTERNAL_1  : third party device
+ * @EXTERNAL_2  : third party device
+ * @EXTERNAL_3  : third party device
+ * @MAX         : invalid device id
+ */
+enum cam_req_mgr_device_id {
+	CAM_REQ_MGR_DEVICE,
+	CAM_REQ_MGR_DEVICE_SENSOR,
+	CAM_REQ_MGR_DEVICE_FLASH,
+	CAM_REQ_MGR_DEVICE_ACTUATOR,
+	CAM_REQ_MGR_DEVICE_IFE,
+	CAM_REQ_MGR_DEVICE_EXTERNAL_1,
+	CAM_REQ_MGR_DEVICE_EXTERNAL_2,
+	CAM_REQ_MGR_DEVICE_EXTERNAL_3,
+	CAM_REQ_MGR_DEVICE_ID_MAX,
+};
+
+/* Camera device driver to Req Mgr device interface */
+
+/**
+ * enum cam_req_mgr_link_evt_type
+ * @CAM_REQ_MGR_LINK_EVT_ERR        : error on the link from any of the
+ *                                    connected devices
+ * @CAM_REQ_MGR_LINK_EVT_PAUSE      : to pause the link
+ * @CAM_REQ_MGR_LINK_EVT_RESUME     : resumes the link which was paused
+ * @CAM_REQ_MGR_LINK_EVT_SOF_FREEZE : request manager has detected an sof freeze
+ * @CAM_REQ_MGR_LINK_EVT_MAX        : invalid event type
+ */
+enum cam_req_mgr_link_evt_type {
+	CAM_REQ_MGR_LINK_EVT_ERR,
+	CAM_REQ_MGR_LINK_EVT_PAUSE,
+	CAM_REQ_MGR_LINK_EVT_RESUME,
+	CAM_REQ_MGR_LINK_EVT_SOF_FREEZE,
+	CAM_REQ_MGR_LINK_EVT_MAX,
+};
+
+/**
+ * struct cam_req_mgr_trigger_notify
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @frame_id : frame id for internal tracking
+ * @trigger  : trigger point of this notification, CRM will send apply
+ * only to the devices which subscribe to this point.
+ */
+struct cam_req_mgr_trigger_notify {
+	int32_t  link_hdl;
+	int32_t  dev_hdl;
+	int64_t  frame_id;
+	uint32_t trigger;
+};
+
+/**
+ * struct cam_req_mgr_error_notify
+ * @link_hdl : link identifier
+ * @dev_hdl  : device handle which has sent this req id
+ * @req_id   : req id which hit error
+ * @error    : what error device hit while processing this req
+ */
+struct cam_req_mgr_error_notify {
+	int32_t  link_hdl;
+	int32_t  dev_hdl;
+	uint64_t req_id;
+	enum cam_req_mgr_device_error error;
+};
+
+/**
+ * struct cam_req_mgr_add_request
+ * @link_hdl             : link identifier
+ * @dev_hdl              : device handle which has sent this req id
+ * @req_id               : req id which device is ready to process
+ * @skip_before_applying : before applying req mgr introduce bubble
+ *                         by not sending request to device/s.
+ *                         ex: IFE and Flash
+ */
+struct cam_req_mgr_add_request {
+	int32_t  link_hdl;
+	int32_t  dev_hdl;
+	uint64_t req_id;
+	uint32_t skip_before_applying;
+};
+
+
+/* CRM to KMD devices */
+/**
+ * struct cam_req_mgr_device_info
+ * @dev_hdl : Input_param : device handle for reference
+ * @name    : link link or unlink
+ * @dev_id  : device id info
+ * @p_delay : delay between time settings applied and take effect
+ * @trigger : Trigger point for the client
+ *
+ */
+struct cam_req_mgr_device_info {
+	int32_t                     dev_hdl;
+	char                        name[256];
+	enum cam_req_mgr_device_id  dev_id;
+	enum cam_pipeline_delay     p_delay;
+	uint32_t                    trigger;
+};
+
+/**
+ * struct cam_req_mgr_core_dev_link_setup
+ * @link_enable     : link link or unlink
+ * @link_hdl        : link identifier
+ * @dev_hdl         : device handle for reference
+ * @max_delay       : max pipeline delay on this link
+ * @crm_cb          : callback funcs to communicate with req mgr
+ * @subscribe_event : the mask of trigger points this link subscribes
+ *
+ */
+struct cam_req_mgr_core_dev_link_setup {
+	int32_t                    link_enable;
+	int32_t                    link_hdl;
+	int32_t                    dev_hdl;
+	enum cam_pipeline_delay    max_delay;
+	struct cam_req_mgr_crm_cb *crm_cb;
+	uint32_t                   subscribe_event;
+};
+
+/**
+ * struct cam_req_mgr_apply_request
+ * @link_hdl         : link identifier
+ * @dev_hdl          : device handle for cross check
+ * @request_id       : request id settings to apply
+ * @report_if_bubble : report to crm if failure in applying
+ * @trigger_point    : the trigger point of this apply
+ *
+ */
+struct cam_req_mgr_apply_request {
+	int32_t    link_hdl;
+	int32_t    dev_hdl;
+	uint64_t   request_id;
+	int32_t    report_if_bubble;
+	uint32_t   trigger_point;
+};
+
+/**
+ * struct cam_req_mgr_flush_request
+ * @link_hdl    : link identifier
+ * @dev_hdl     : device handle for cross check
+ * @type        : cancel request type flush all or a request
+ * @request_id  : request id to cancel
+ *
+ */
+struct cam_req_mgr_flush_request {
+	int32_t     link_hdl;
+	int32_t     dev_hdl;
+	uint32_t    type;
+	uint64_t    req_id;
+};
+
+/**
+ * struct cam_req_mgr_event_data
+ * @link_hdl : link handle
+ * @req_id   : request id
+ *
+ */
+struct cam_req_mgr_link_evt_data {
+	int32_t  link_hdl;
+	int32_t  dev_hdl;
+	uint64_t req_id;
+
+	enum cam_req_mgr_link_evt_type evt_type;
+	union {
+		enum cam_req_mgr_device_error error;
+	} u;
+};
+
+/**
+ * struct cam_req_mgr_send_request
+ * @link_hdl   : link identifier
+ * @idx        : slot idx
+ *
+ */
+struct cam_req_mgr_send_request {
+	int32_t    link_hdl;
+	struct cam_req_mgr_req_queue *in_q;
+};
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
new file mode 100644
index 0000000..a497216
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_req_mgr_timer.h"
+#include "cam_debug_util.h"
+
+void crm_timer_reset(struct cam_req_mgr_timer *crm_timer)
+{
+	if (!crm_timer)
+		return;
+	CAM_DBG(CAM_CRM, "Starting timer to fire in %d ms. (jiffies=%lu)\n",
+		crm_timer->expires, jiffies);
+	mod_timer(&crm_timer->sys_timer,
+		(jiffies + msecs_to_jiffies(crm_timer->expires)));
+}
+
+void crm_timer_callback(struct timer_list *timer_data)
+{
+	struct cam_req_mgr_timer *timer =
+		container_of(timer_data, struct cam_req_mgr_timer, sys_timer);
+	if (!timer) {
+		CAM_ERR(CAM_CRM, "NULL timer");
+		return;
+	}
+	CAM_DBG(CAM_CRM, "timer %pK parent %pK", timer, timer->parent);
+	crm_timer_reset(timer);
+}
+
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+	int32_t expires)
+{
+	CAM_DBG(CAM_CRM, "new time %d", expires);
+	if (crm_timer) {
+		crm_timer->expires = expires;
+		crm_timer_reset(crm_timer);
+	}
+}
+
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+	int32_t expires, void *parent, void (*timer_cb)(struct timer_list *))
+{
+	int                       ret = 0;
+	struct cam_req_mgr_timer *crm_timer = NULL;
+
+	CAM_DBG(CAM_CRM, "init timer %d %pK", expires, *timer);
+	if (*timer == NULL) {
+		if (g_cam_req_mgr_timer_cachep) {
+			crm_timer = kmem_cache_alloc(g_cam_req_mgr_timer_cachep,
+					__GFP_ZERO | GFP_KERNEL);
+			if (!crm_timer) {
+				ret = -ENOMEM;
+				goto end;
+			}
+		}
+
+		else {
+			ret = -ENOMEM;
+			goto end;
+		}
+
+		if (timer_cb != NULL)
+			crm_timer->timer_cb = timer_cb;
+		else
+			crm_timer->timer_cb = crm_timer_callback;
+
+		crm_timer->expires = expires;
+		crm_timer->parent = parent;
+		timer_setup(&crm_timer->sys_timer,
+			crm_timer->timer_cb, 0);
+		crm_timer_reset(crm_timer);
+		*timer = crm_timer;
+	} else {
+		CAM_WARN(CAM_CRM, "Timer already exists!!");
+		ret = -EINVAL;
+	}
+end:
+	return ret;
+}
+void crm_timer_exit(struct cam_req_mgr_timer **crm_timer)
+{
+	CAM_DBG(CAM_CRM, "destroy timer %pK @ %pK", *crm_timer, crm_timer);
+	if (*crm_timer) {
+		del_timer_sync(&(*crm_timer)->sys_timer);
+		if (g_cam_req_mgr_timer_cachep)
+			kmem_cache_free(g_cam_req_mgr_timer_cachep, *crm_timer);
+		*crm_timer = NULL;
+	}
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h
new file mode 100644
index 0000000..0f9f2e0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_timer.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_REQ_MGR_TIMER_H_
+#define _CAM_REQ_MGR_TIMER_H_
+
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "cam_req_mgr_core_defs.h"
+
+/** struct cam_req_mgr_timer
+ * @expires   : timeout value for timer
+ * @sys_timer : system timer variable
+ * @parent    : priv data - link pointer
+ * @timer_cb  : callback func which will be called when timeout expires
+ */
+struct cam_req_mgr_timer {
+	int32_t            expires;
+	struct timer_list  sys_timer;
+	void               *parent;
+	void               (*timer_cb)(struct timer_list *timer_data);
+};
+
+/**
+ * crm_timer_modify()
+ * @brief : allows ser to modify expiry time.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_modify(struct cam_req_mgr_timer *crm_timer,
+	int32_t expires);
+
+/**
+ * crm_timer_reset()
+ * @brief : destroys the timer allocated.
+ * @timer : timer which will be reset to expires values
+ */
+void crm_timer_reset(struct cam_req_mgr_timer *timer);
+
+/**
+ * crm_timer_init()
+ * @brief    : create a new general purpose timer.
+ *             timer utility takes care of allocating memory and deleting
+ * @timer    : double pointer to new timer allocated
+ * @expires  : Timeout value to fire callback
+ * @parent   : void pointer which caller can use for book keeping
+ * @timer_cb : caller can chose to use its own callback function when
+ *             timer fires the timeout. If no value is set timer util
+ *             will use default.
+ */
+int crm_timer_init(struct cam_req_mgr_timer **timer,
+	int32_t expires, void *parent, void (*timer_cb)(struct timer_list *));
+
+/**
+ * crm_timer_exit()
+ * @brief : destroys the timer allocated.
+ * @timer : timer pointer which will be freed
+ */
+void crm_timer_exit(struct cam_req_mgr_timer **timer);
+
+extern struct kmem_cache *g_cam_req_mgr_timer_cachep;
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
new file mode 100644
index 0000000..d28ab81
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "CAM-REQ-MGR_UTIL %s:%d " fmt, __func__, __LINE__
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util.h"
+#include "cam_debug_util.h"
+
+static struct cam_req_mgr_util_hdl_tbl *hdl_tbl;
+static DEFINE_SPINLOCK(hdl_tbl_lock);
+
+int cam_req_mgr_util_init(void)
+{
+	int rc = 0;
+	int bitmap_size;
+	static struct cam_req_mgr_util_hdl_tbl *hdl_tbl_local;
+
+	if (hdl_tbl) {
+		rc = -EINVAL;
+		CAM_ERR(CAM_CRM, "Hdl_tbl is already present");
+		goto hdl_tbl_check_failed;
+	}
+
+	hdl_tbl_local = kzalloc(sizeof(*hdl_tbl), GFP_KERNEL);
+	if (!hdl_tbl_local) {
+		rc = -ENOMEM;
+		goto hdl_tbl_alloc_failed;
+	}
+	spin_lock_bh(&hdl_tbl_lock);
+	if (hdl_tbl) {
+		spin_unlock_bh(&hdl_tbl_lock);
+		rc = -EEXIST;
+		kfree(hdl_tbl_local);
+		goto hdl_tbl_check_failed;
+	}
+	hdl_tbl = hdl_tbl_local;
+	spin_unlock_bh(&hdl_tbl_lock);
+
+	bitmap_size = BITS_TO_LONGS(CAM_REQ_MGR_MAX_HANDLES) * sizeof(long);
+	hdl_tbl->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!hdl_tbl->bitmap) {
+		rc = -ENOMEM;
+		goto bitmap_alloc_fail;
+	}
+	hdl_tbl->bits = bitmap_size * BITS_PER_BYTE;
+
+	return rc;
+
+bitmap_alloc_fail:
+	kfree(hdl_tbl);
+	hdl_tbl = NULL;
+hdl_tbl_alloc_failed:
+hdl_tbl_check_failed:
+	return rc;
+}
+
+int cam_req_mgr_util_deinit(void)
+{
+	spin_lock_bh(&hdl_tbl_lock);
+	if (!hdl_tbl) {
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+		spin_unlock_bh(&hdl_tbl_lock);
+		return -EINVAL;
+	}
+
+	kfree(hdl_tbl->bitmap);
+	hdl_tbl->bitmap = NULL;
+	kfree(hdl_tbl);
+	hdl_tbl = NULL;
+	spin_unlock_bh(&hdl_tbl_lock);
+
+	return 0;
+}
+
+int cam_req_mgr_util_free_hdls(void)
+{
+	int i = 0;
+
+	spin_lock_bh(&hdl_tbl_lock);
+	if (!hdl_tbl) {
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+		spin_unlock_bh(&hdl_tbl_lock);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < CAM_REQ_MGR_MAX_HANDLES; i++) {
+		if (hdl_tbl->hdl[i].state == HDL_ACTIVE) {
+			CAM_ERR(CAM_CRM, "Dev handle = %x session_handle = %x",
+				hdl_tbl->hdl[i].hdl_value,
+				hdl_tbl->hdl[i].session_hdl);
+			hdl_tbl->hdl[i].state = HDL_FREE;
+			clear_bit(i, hdl_tbl->bitmap);
+		}
+	}
+	bitmap_zero(hdl_tbl->bitmap, CAM_REQ_MGR_MAX_HANDLES);
+	spin_unlock_bh(&hdl_tbl_lock);
+
+	return 0;
+}
+
+static int32_t cam_get_free_handle_index(void)
+{
+	int idx;
+
+	idx = find_first_zero_bit(hdl_tbl->bitmap, hdl_tbl->bits);
+
+	if (idx >= CAM_REQ_MGR_MAX_HANDLES || idx < 0)
+		return -ENOSR;
+
+	set_bit(idx, hdl_tbl->bitmap);
+
+	return idx;
+}
+
+int32_t cam_create_session_hdl(void *priv)
+{
+	int idx;
+	int rand = 0;
+	int32_t handle = 0;
+
+	spin_lock_bh(&hdl_tbl_lock);
+	if (!hdl_tbl) {
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+		spin_unlock_bh(&hdl_tbl_lock);
+		return -EINVAL;
+	}
+
+	idx = cam_get_free_handle_index();
+	if (idx < 0) {
+		CAM_ERR(CAM_CRM, "Unable to create session handle");
+		spin_unlock_bh(&hdl_tbl_lock);
+		return idx;
+	}
+
+	get_random_bytes(&rand, CAM_REQ_MGR_RND1_BYTES);
+	handle = GET_DEV_HANDLE(rand, HDL_TYPE_SESSION, idx);
+	hdl_tbl->hdl[idx].session_hdl = handle;
+	hdl_tbl->hdl[idx].hdl_value = handle;
+	hdl_tbl->hdl[idx].type = HDL_TYPE_SESSION;
+	hdl_tbl->hdl[idx].state = HDL_ACTIVE;
+	hdl_tbl->hdl[idx].priv = priv;
+	hdl_tbl->hdl[idx].ops = NULL;
+	spin_unlock_bh(&hdl_tbl_lock);
+
+	return handle;
+}
+
+int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data)
+{
+	int idx;
+	int rand = 0;
+	int32_t handle;
+
+	spin_lock_bh(&hdl_tbl_lock);
+	if (!hdl_tbl) {
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+		spin_unlock_bh(&hdl_tbl_lock);
+		return -EINVAL;
+	}
+
+	idx = cam_get_free_handle_index();
+	if (idx < 0) {
+		CAM_ERR(CAM_CRM, "Unable to create device handle");
+		spin_unlock_bh(&hdl_tbl_lock);
+		return idx;
+	}
+
+	get_random_bytes(&rand, CAM_REQ_MGR_RND1_BYTES);
+	handle = GET_DEV_HANDLE(rand, HDL_TYPE_DEV, idx);
+	hdl_tbl->hdl[idx].session_hdl = hdl_data->session_hdl;
+	hdl_tbl->hdl[idx].hdl_value = handle;
+	hdl_tbl->hdl[idx].type = HDL_TYPE_DEV;
+	hdl_tbl->hdl[idx].state = HDL_ACTIVE;
+	hdl_tbl->hdl[idx].priv = hdl_data->priv;
+	hdl_tbl->hdl[idx].ops = hdl_data->ops;
+	spin_unlock_bh(&hdl_tbl_lock);
+
+	pr_debug("%s: handle = %x", __func__, handle);
+	return handle;
+}
+
+void *cam_get_device_priv(int32_t dev_hdl)
+{
+	int idx;
+	int type;
+	void *priv;
+
+	spin_lock_bh(&hdl_tbl_lock);
+	if (!hdl_tbl) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Hdl tbl is NULL");
+		goto device_priv_fail;
+	}
+
+	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
+	if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid idx");
+		goto device_priv_fail;
+	}
+
+	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid state");
+		goto device_priv_fail;
+	}
+
+	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
+	if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid type");
+		goto device_priv_fail;
+	}
+
+	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
+		CAM_ERR_RATE_LIMIT(CAM_CRM, "Invalid hdl");
+		goto device_priv_fail;
+	}
+
+	priv = hdl_tbl->hdl[idx].priv;
+	spin_unlock_bh(&hdl_tbl_lock);
+
+	return priv;
+
+device_priv_fail:
+	spin_unlock_bh(&hdl_tbl_lock);
+	return NULL;
+}
+
+void *cam_get_device_ops(int32_t dev_hdl)
+{
+	int idx;
+	int type;
+	void *ops;
+
+	spin_lock_bh(&hdl_tbl_lock);
+	if (!hdl_tbl) {
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+		goto device_ops_fail;
+	}
+
+	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
+	if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
+		CAM_ERR(CAM_CRM, "Invalid idx");
+		goto device_ops_fail;
+	}
+
+	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
+		CAM_ERR(CAM_CRM, "Invalid state");
+		goto device_ops_fail;
+	}
+
+	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
+	if (HDL_TYPE_DEV != type && HDL_TYPE_SESSION != type) {
+		CAM_ERR(CAM_CRM, "Invalid type");
+		goto device_ops_fail;
+	}
+
+	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
+		CAM_ERR(CAM_CRM, "Invalid hdl");
+		goto device_ops_fail;
+	}
+
+	ops = hdl_tbl->hdl[idx].ops;
+	spin_unlock_bh(&hdl_tbl_lock);
+
+	return ops;
+
+device_ops_fail:
+	spin_unlock_bh(&hdl_tbl_lock);
+	return NULL;
+}
+
+static int cam_destroy_hdl(int32_t dev_hdl, int dev_hdl_type)
+{
+	int idx;
+	int type;
+
+	spin_lock_bh(&hdl_tbl_lock);
+	if (!hdl_tbl) {
+		CAM_ERR(CAM_CRM, "Hdl tbl is NULL");
+		goto destroy_hdl_fail;
+	}
+
+	idx = CAM_REQ_MGR_GET_HDL_IDX(dev_hdl);
+	if (idx >= CAM_REQ_MGR_MAX_HANDLES) {
+		CAM_ERR(CAM_CRM, "Invalid idx %d", idx);
+		goto destroy_hdl_fail;
+	}
+
+	if (hdl_tbl->hdl[idx].state != HDL_ACTIVE) {
+		CAM_ERR(CAM_CRM, "Invalid state");
+		goto destroy_hdl_fail;
+	}
+
+	type = CAM_REQ_MGR_GET_HDL_TYPE(dev_hdl);
+	if (type != dev_hdl_type) {
+		CAM_ERR(CAM_CRM, "Invalid type %d, %d", type, dev_hdl_type);
+		goto destroy_hdl_fail;
+	}
+
+	if (hdl_tbl->hdl[idx].hdl_value != dev_hdl) {
+		CAM_ERR(CAM_CRM, "Invalid hdl");
+		goto destroy_hdl_fail;
+	}
+
+	hdl_tbl->hdl[idx].state = HDL_FREE;
+	hdl_tbl->hdl[idx].ops   = NULL;
+	hdl_tbl->hdl[idx].priv  = NULL;
+	clear_bit(idx, hdl_tbl->bitmap);
+	spin_unlock_bh(&hdl_tbl_lock);
+
+	return 0;
+
+destroy_hdl_fail:
+	spin_unlock_bh(&hdl_tbl_lock);
+	return -EINVAL;
+}
+
+int cam_destroy_device_hdl(int32_t dev_hdl)
+{
+	return cam_destroy_hdl(dev_hdl, HDL_TYPE_DEV);
+}
+
+int cam_destroy_session_hdl(int32_t dev_hdl)
+{
+	return cam_destroy_hdl(dev_hdl, HDL_TYPE_SESSION);
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.h
new file mode 100644
index 0000000..aa0a83e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_REQ_MGR_UTIL_API_H_
+#define _CAM_REQ_MGR_UTIL_API_H_
+
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util_priv.h"
+
+/**
+ * state of a handle(session/device)
+ * @HDL_FREE: free handle
+ * @HDL_ACTIVE: active handles
+ */
+enum hdl_state {
+	HDL_FREE,
+	HDL_ACTIVE
+};
+
+/**
+ * handle type
+ * @HDL_TYPE_DEV: for device and link
+ * @HDL_TYPE_SESSION: for session
+ */
+enum hdl_type {
+	HDL_TYPE_DEV = 1,
+	HDL_TYPE_SESSION
+};
+
+/**
+ * struct handle
+ * @session_hdl: session handle
+ * @hdl_value: Allocated handle
+ * @type: session/device handle
+ * @state: free/used
+ * @ops: ops structure
+ * @priv: private data of a handle
+ */
+struct handle {
+	int32_t session_hdl;
+	uint32_t hdl_value;
+	enum hdl_type type;
+	enum hdl_state state;
+	void *ops;
+	void *priv;
+};
+
+/**
+ * struct cam_req_mgr_util_hdl_tbl
+ * @hdl: row of handles
+ * @bitmap: bit map to get free hdl row idx
+ * @bits: size of bit map in bits
+ */
+struct cam_req_mgr_util_hdl_tbl {
+	struct handle hdl[CAM_REQ_MGR_MAX_HANDLES];
+	void *bitmap;
+	size_t bits;
+};
+
+/**
+ * cam_req_mgr_util APIs for KMD drivers and cam_req_mgr
+ * @session_hdl: session_hdl info
+ * @v4l2_sub_dev_flag: flag to create v4l2 sub device
+ * @media_entity_flag: flag for media entity
+ * @reserved: reserved field
+ * @ops: ops pointer for a device handle
+ * @priv: private data for a device handle
+ */
+struct cam_create_dev_hdl {
+	int32_t session_hdl;
+	int32_t v4l2_sub_dev_flag;
+	int32_t media_entity_flag;
+	int32_t reserved;
+	void *ops;
+	void *priv;
+};
+
+/**
+ * cam_create_session_hdl() - create a session handle
+ * @priv: private data for a session handle
+ *
+ * cam_req_mgr core calls this function to get
+ * a unique session handle. Returns a unique session
+ * handle
+ */
+int32_t cam_create_session_hdl(void *priv);
+
+/**
+ * cam_create_device_hdl() - create a device handle
+ * @hdl_data: session hdl, flags, ops and priv dara as input
+ *
+ * cam_req_mgr_core calls this function to get
+ * session and link handles
+ * KMD drivers calls this function to create
+ * a device handle. Returns a unique device handle
+ */
+int32_t cam_create_device_hdl(struct cam_create_dev_hdl *hdl_data);
+
+/**
+ * cam_get_device_priv() - get private data of a handle
+ * @dev_hdl: handle for a session/link/device
+ *
+ * cam_req_mgr_core and KMD drivers use this function to
+ * get private data of a handle. Returns a private data
+ * structure pointer.
+ */
+void *cam_get_device_priv(int32_t dev_hdl);
+
+/**
+ * cam_get_device_ops() - get ops of a handle
+ * @dev_hdl: handle for a session/link/device
+ *
+ * cam_req_mgr_core and KMD drivers use this function to
+ * get ops of a handle. Returns a pointer to ops.
+ */
+void *cam_get_device_ops(int32_t dev_hdl);
+
+/**
+ * cam_destroy_device_hdl() - destroy device handle
+ * @dev_hdl: handle for a link/device.
+ *
+ * Returns success/failure
+ */
+int32_t cam_destroy_device_hdl(int32_t dev_hdl);
+
+/**
+ * cam_destroy_session_hdl() - destroy device handle
+ * @dev_hdl: handle for a session
+ *
+ * Returns success/failure
+ */
+int32_t cam_destroy_session_hdl(int32_t dev_hdl);
+
+
+/* Internal functions */
+/**
+ * cam_req_mgr_util_init() - init function of cam_req_mgr_util
+ *
+ * This is called as part of probe function to initialize
+ * handle table, bitmap, locks
+ */
+int cam_req_mgr_util_init(void);
+
+/**
+ * cam_req_mgr_util_deinit() - deinit function of cam_req_mgr_util
+ *
+ * This function is called in case of probe failure
+ */
+int32_t cam_req_mgr_util_deinit(void);
+
+/**
+ * cam_req_mgr_util_free_hdls() - free handles in case of crash
+ *
+ * Called from cam_req_mgr_dev release function to make sure
+ * all data structures are cleaned to avoid leaks
+ *
+ * cam_req_mgr core can call this function at the end of
+ * camera to make sure all stale entries are printed and
+ * cleaned
+ */
+int32_t cam_req_mgr_util_free_hdls(void);
+
+#endif /* _CAM_REQ_MGR_UTIL_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util_priv.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util_priv.h
new file mode 100644
index 0000000..6d719be
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_util_priv.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_REQ_MGR_UTIL_PRIV_H_
+#define _CAM_REQ_MGR_UTIL_PRIV_H_
+
+/**
+ * handle format:
+ * @bits (0-7): handle index
+ * @bits (8-11): handle type
+ * @bits (12-15): reserved
+ * @bits (16-23): random bits
+ * @bits (24-31): zeros
+ */
+
+#define CAM_REQ_MGR_HDL_SIZE            32
+#define CAM_REQ_MGR_RND1_SIZE           8
+#define CAM_REQ_MGR_RVD_SIZE            4
+#define CAM_REQ_MGR_HDL_TYPE_SIZE       4
+#define CAM_REQ_MGR_HDL_IDX_SIZE        8
+
+#define CAM_REQ_MGR_RND1_POS            24
+#define CAM_REQ_MGR_RVD_POS             16
+#define CAM_REQ_MGR_HDL_TYPE_POS        12
+
+#define CAM_REQ_MGR_RND1_BYTES          1
+
+#define CAM_REQ_MGR_HDL_TYPE_MASK      ((1 << CAM_REQ_MGR_HDL_TYPE_SIZE) - 1)
+
+#define GET_DEV_HANDLE(rnd1, type, idx) \
+	((rnd1 << (CAM_REQ_MGR_RND1_POS - CAM_REQ_MGR_RND1_SIZE)) | \
+	(0x0  << (CAM_REQ_MGR_RVD_POS - CAM_REQ_MGR_RVD_SIZE)) | \
+	(type << (CAM_REQ_MGR_HDL_TYPE_POS - CAM_REQ_MGR_HDL_TYPE_SIZE)) | \
+	(idx << (CAM_REQ_MGR_HDL_IDX_POS - CAM_REQ_MGR_HDL_IDX_SIZE))) \
+
+#define CAM_REQ_MGR_GET_HDL_IDX(hdl) (hdl & CAM_REQ_MGR_HDL_IDX_MASK)
+#define CAM_REQ_MGR_GET_HDL_TYPE(hdl) \
+	((hdl >> CAM_REQ_MGR_HDL_IDX_POS) & CAM_REQ_MGR_HDL_TYPE_MASK)
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
new file mode 100644
index 0000000..2ea33e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_req_mgr_workq.h"
+#include "cam_debug_util.h"
+
+#define WORKQ_ACQUIRE_LOCK(workq, flags) {\
+	if ((workq)->in_irq) \
+		spin_lock_irqsave(&(workq)->lock_bh, (flags)); \
+	else \
+		spin_lock_bh(&(workq)->lock_bh); \
+}
+
+#define WORKQ_RELEASE_LOCK(workq, flags) {\
+	if ((workq)->in_irq) \
+		spin_unlock_irqrestore(&(workq)->lock_bh, (flags)); \
+	else	\
+		spin_unlock_bh(&(workq)->lock_bh); \
+}
+
+struct crm_workq_task *cam_req_mgr_workq_get_task(
+	struct cam_req_mgr_core_workq *workq)
+{
+	struct crm_workq_task *task = NULL;
+	unsigned long flags = 0;
+
+	if (!workq)
+		return NULL;
+
+	WORKQ_ACQUIRE_LOCK(workq, flags);
+	if (list_empty(&workq->task.empty_head))
+		goto end;
+
+	task = list_first_entry(&workq->task.empty_head,
+		struct crm_workq_task, entry);
+	if (task) {
+		atomic_sub(1, &workq->task.free_cnt);
+		list_del_init(&task->entry);
+	}
+
+end:
+	WORKQ_RELEASE_LOCK(workq, flags);
+
+	return task;
+}
+
+static void cam_req_mgr_workq_put_task(struct crm_workq_task *task)
+{
+	struct cam_req_mgr_core_workq *workq =
+		(struct cam_req_mgr_core_workq *)task->parent;
+	unsigned long flags = 0;
+
+	list_del_init(&task->entry);
+	task->cancel = 0;
+	task->process_cb = NULL;
+	task->priv = NULL;
+	WORKQ_ACQUIRE_LOCK(workq, flags);
+	list_add_tail(&task->entry,
+		&workq->task.empty_head);
+	atomic_add(1, &workq->task.free_cnt);
+	WORKQ_RELEASE_LOCK(workq, flags);
+}
+
+/**
+ * cam_req_mgr_process_task() - Process the enqueued task
+ * @task: pointer to task workq thread shall process
+ */
+static int cam_req_mgr_process_task(struct crm_workq_task *task)
+{
+	struct cam_req_mgr_core_workq *workq = NULL;
+
+	if (!task)
+		return -EINVAL;
+
+	workq = (struct cam_req_mgr_core_workq *)task->parent;
+	if (task->process_cb)
+		task->process_cb(task->priv, task->payload);
+	else
+		CAM_WARN(CAM_CRM, "FATAL:no task handler registered for workq");
+	cam_req_mgr_workq_put_task(task);
+
+	return 0;
+}
+
+/**
+ * cam_req_mgr_process_workq() - main loop handling
+ * @w: workqueue task pointer
+ */
+static void cam_req_mgr_process_workq(struct work_struct *w)
+{
+	struct cam_req_mgr_core_workq *workq = NULL;
+	struct crm_workq_task         *task;
+	int32_t                        i = CRM_TASK_PRIORITY_0;
+	unsigned long                  flags = 0;
+
+	if (!w) {
+		CAM_ERR(CAM_CRM, "NULL task pointer can not schedule");
+		return;
+	}
+	workq = (struct cam_req_mgr_core_workq *)
+		container_of(w, struct cam_req_mgr_core_workq, work);
+
+	while (i < CRM_TASK_PRIORITY_MAX) {
+		WORKQ_ACQUIRE_LOCK(workq, flags);
+		while (!list_empty(&workq->task.process_head[i])) {
+			task = list_first_entry(&workq->task.process_head[i],
+				struct crm_workq_task, entry);
+			atomic_sub(1, &workq->task.pending_cnt);
+			list_del_init(&task->entry);
+			WORKQ_RELEASE_LOCK(workq, flags);
+			cam_req_mgr_process_task(task);
+			CAM_DBG(CAM_CRM, "processed task %pK free_cnt %d",
+				task, atomic_read(&workq->task.free_cnt));
+			WORKQ_ACQUIRE_LOCK(workq, flags);
+		}
+		WORKQ_RELEASE_LOCK(workq, flags);
+		i++;
+	}
+}
+
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+	void *priv, int32_t prio)
+{
+	int rc = 0;
+	struct cam_req_mgr_core_workq *workq = NULL;
+	unsigned long flags = 0;
+
+	if (!task) {
+		CAM_WARN(CAM_CRM, "NULL task pointer can not schedule");
+		rc = -EINVAL;
+		goto end;
+	}
+	workq = (struct cam_req_mgr_core_workq *)task->parent;
+	if (!workq) {
+		CAM_DBG(CAM_CRM, "NULL workq pointer suspect mem corruption");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (task->cancel == 1) {
+		cam_req_mgr_workq_put_task(task);
+		CAM_WARN(CAM_CRM, "task aborted and queued back to pool");
+		rc = 0;
+		goto end;
+	}
+	task->priv = priv;
+	task->priority =
+		(prio < CRM_TASK_PRIORITY_MAX && prio >= CRM_TASK_PRIORITY_0)
+		? prio : CRM_TASK_PRIORITY_0;
+
+	WORKQ_ACQUIRE_LOCK(workq, flags);
+		if (!workq->job) {
+			rc = -EINVAL;
+			WORKQ_RELEASE_LOCK(workq, flags);
+			goto end;
+		}
+
+	list_add_tail(&task->entry,
+		&workq->task.process_head[task->priority]);
+
+	atomic_add(1, &workq->task.pending_cnt);
+	CAM_DBG(CAM_CRM, "enq task %pK pending_cnt %d",
+		task, atomic_read(&workq->task.pending_cnt));
+
+	queue_work(workq->job, &workq->work);
+	WORKQ_RELEASE_LOCK(workq, flags);
+end:
+	return rc;
+}
+
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
+	struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq,
+	int flags)
+{
+	int32_t i, wq_flags = 0, max_active_tasks = 0;
+	struct crm_workq_task  *task;
+	struct cam_req_mgr_core_workq *crm_workq = NULL;
+	char buf[128] = "crm_workq-";
+
+	if (!*workq) {
+		crm_workq = kzalloc(sizeof(struct cam_req_mgr_core_workq),
+			GFP_KERNEL);
+		if (crm_workq == NULL)
+			return -ENOMEM;
+
+		wq_flags |= WQ_UNBOUND;
+		if (flags & CAM_WORKQ_FLAG_HIGH_PRIORITY)
+			wq_flags |= WQ_HIGHPRI;
+
+		if (flags & CAM_WORKQ_FLAG_SERIAL)
+			max_active_tasks = 1;
+
+		strlcat(buf, name, sizeof(buf));
+		CAM_DBG(CAM_CRM, "create workque crm_workq-%s", name);
+		crm_workq->job = alloc_workqueue(buf,
+			wq_flags, max_active_tasks, NULL);
+		if (!crm_workq->job) {
+			kfree(crm_workq);
+			return -ENOMEM;
+		}
+
+		/* Workq attributes initialization */
+		INIT_WORK(&crm_workq->work, cam_req_mgr_process_workq);
+		spin_lock_init(&crm_workq->lock_bh);
+		CAM_DBG(CAM_CRM, "LOCK_DBG workq %s lock %pK",
+			name, &crm_workq->lock_bh);
+
+		/* Task attributes initialization */
+		atomic_set(&crm_workq->task.pending_cnt, 0);
+		atomic_set(&crm_workq->task.free_cnt, 0);
+		for (i = CRM_TASK_PRIORITY_0; i < CRM_TASK_PRIORITY_MAX; i++)
+			INIT_LIST_HEAD(&crm_workq->task.process_head[i]);
+		INIT_LIST_HEAD(&crm_workq->task.empty_head);
+		crm_workq->in_irq = in_irq;
+		crm_workq->task.num_task = num_tasks;
+		crm_workq->task.pool = kcalloc(crm_workq->task.num_task,
+				sizeof(struct crm_workq_task), GFP_KERNEL);
+		if (!crm_workq->task.pool) {
+			CAM_WARN(CAM_CRM, "Insufficient memory %zu",
+				sizeof(struct crm_workq_task) *
+				crm_workq->task.num_task);
+			kfree(crm_workq);
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < crm_workq->task.num_task; i++) {
+			task = &crm_workq->task.pool[i];
+			task->parent = (void *)crm_workq;
+			/* Put all tasks in free pool */
+			INIT_LIST_HEAD(&task->entry);
+			cam_req_mgr_workq_put_task(task);
+		}
+		*workq = crm_workq;
+		CAM_DBG(CAM_CRM, "free tasks %d",
+			atomic_read(&crm_workq->task.free_cnt));
+	}
+
+	return 0;
+}
+
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **crm_workq)
+{
+	unsigned long flags = 0;
+	struct workqueue_struct   *job;
+
+	CAM_DBG(CAM_CRM, "destroy workque %pK", crm_workq);
+	if (*crm_workq) {
+		WORKQ_ACQUIRE_LOCK(*crm_workq, flags);
+		if ((*crm_workq)->job) {
+			job = (*crm_workq)->job;
+			(*crm_workq)->job = NULL;
+			WORKQ_RELEASE_LOCK(*crm_workq, flags);
+			destroy_workqueue(job);
+		} else {
+			WORKQ_RELEASE_LOCK(*crm_workq, flags);
+		}
+
+		/* Destroy workq payload data */
+		kfree((*crm_workq)->task.pool[0].payload);
+		(*crm_workq)->task.pool[0].payload = NULL;
+		kfree((*crm_workq)->task.pool);
+		kfree(*crm_workq);
+		*crm_workq = NULL;
+	}
+}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
new file mode 100644
index 0000000..5ac6689
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_workq.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_REQ_MGR_WORKQ_H_
+#define _CAM_REQ_MGR_WORKQ_H_
+
+#include<linux/kernel.h>
+#include<linux/module.h>
+#include<linux/init.h>
+#include<linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+
+#include "cam_req_mgr_core.h"
+
+/* Flag to create a high priority workq */
+#define CAM_WORKQ_FLAG_HIGH_PRIORITY             (1 << 0)
+
+/*
+ * This flag ensures only one task from a given
+ * workq will execute at any given point on any
+ * given CPU.
+ */
+#define CAM_WORKQ_FLAG_SERIAL                    (1 << 1)
+
+/* Task priorities, lower the number higher the priority*/
+enum crm_task_priority {
+	CRM_TASK_PRIORITY_0,
+	CRM_TASK_PRIORITY_1,
+	CRM_TASK_PRIORITY_MAX,
+};
+
+/* workqueue will be used from irq context or not */
+enum crm_workq_context {
+	CRM_WORKQ_USAGE_NON_IRQ,
+	CRM_WORKQ_USAGE_IRQ,
+	CRM_WORKQ_USAGE_INVALID,
+};
+
+/** struct crm_workq_task
+ * @priority   : caller can assign priority to task based on type.
+ * @payload    : depending of user of task this payload type will change
+ * @process_cb : registered callback called by workq when task enqueued is
+ *               ready for processing in workq thread context
+ * @parent     : workq's parent is link which is enqqueing taks to this workq
+ * @entry      : list head of this list entry is worker's empty_head
+ * @cancel     : if caller has got free task from pool but wants to abort
+ *               or put back without using it
+ * @priv       : when task is enqueuer caller can attach priv along which
+ *               it will get in process callback
+ * @ret        : return value in future to use for blocking calls
+ */
+struct crm_workq_task {
+	int32_t                  priority;
+	void                    *payload;
+	int32_t                (*process_cb)(void *priv, void *data);
+	void                    *parent;
+	struct list_head         entry;
+	uint8_t                  cancel;
+	void                    *priv;
+	int32_t                  ret;
+};
+
+/** struct cam_req_mgr_core_workq
+ * @work       : work token used by workqueue
+ * @job        : workqueue internal job struct
+ * task -
+ * @lock_bh    : lock for task structs
+ * @in_irq     : set true if workque can be used in irq context
+ * @free_cnt   : num of free/available tasks
+ * @empty_head : list  head of available taska which can be used
+ *               or acquired in order to enqueue a task to workq
+ * @pool       : pool of tasks used for handling events in workq context
+ * @num_task   : size of tasks pool
+ * -
+ */
+struct cam_req_mgr_core_workq {
+	struct work_struct         work;
+	struct workqueue_struct   *job;
+	spinlock_t                 lock_bh;
+	uint32_t                   in_irq;
+
+	/* tasks */
+	struct {
+		struct mutex           lock;
+		atomic_t               pending_cnt;
+		atomic_t               free_cnt;
+
+		struct list_head       process_head[CRM_TASK_PRIORITY_MAX];
+		struct list_head       empty_head;
+		struct crm_workq_task *pool;
+		uint32_t               num_task;
+	} task;
+};
+
+/**
+ * cam_req_mgr_workq_create()
+ * @brief    : create a workqueue
+ * @name     : Name of the workque to be allocated, it is combination
+ *             of session handle and link handle
+ * @num_task : Num_tasks to be allocated for workq
+ * @workq    : Double pointer worker
+ * @in_irq   : Set to one if workq might be used in irq context
+ * @flags    : Bitwise OR of Flags for workq behavior.
+ *             e.g. CAM_REQ_MGR_WORKQ_HIGH_PRIORITY | CAM_REQ_MGR_WORKQ_SERIAL
+ * This function will allocate and create workqueue and pass
+ * the workq pointer to caller.
+ */
+int cam_req_mgr_workq_create(char *name, int32_t num_tasks,
+	struct cam_req_mgr_core_workq **workq, enum crm_workq_context in_irq,
+	int flags);
+
+/**
+ * cam_req_mgr_workq_destroy()
+ * @brief: destroy workqueue
+ * @workq: pointer to worker data struct
+ * this function will destroy workqueue and clean up resources
+ * associated with worker such as tasks.
+ */
+void cam_req_mgr_workq_destroy(struct cam_req_mgr_core_workq **workq);
+
+/**
+ * cam_req_mgr_workq_enqueue_task()
+ * @brief: Enqueue task in worker queue
+ * @task : task to be processed by worker
+ * @priv : clients private data
+ * @prio : task priority
+ * process callback func
+ */
+int cam_req_mgr_workq_enqueue_task(struct crm_workq_task *task,
+	void *priv, int32_t prio);
+
+/**
+ * cam_req_mgr_workq_get_task()
+ * @brief: Returns empty task pointer for use
+ * @workq: workque used for processing
+ */
+struct crm_workq_task *cam_req_mgr_workq_get_task(
+	struct cam_req_mgr_core_workq *workq);
+
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
new file mode 100644
index 0000000..c86da37
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_subdev.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SUBDEV_H_
+#define _CAM_SUBDEV_H_
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#define CAM_SUBDEVICE_EVENT_MAX 30
+
+/**
+ * struct cam_subdev - describes a camera sub-device
+ *
+ * @pdev:                  Pointer to the platform device
+ * @sd:                    V4l2 subdevice
+ * @ops:                   V4l2 subdecie operations
+ * @internal_ops:          V4l2 subdevice internal operations
+ * @name:                  Name of the sub-device. Please notice that the name
+ *                             must be unique.
+ * @sd_flags:              Subdev flags. Can be:
+ *                             %V4L2_SUBDEV_FL_HAS_DEVNODE - Set this flag if
+ *                                 this subdev needs a device node.
+ *                             %V4L2_SUBDEV_FL_HAS_EVENTS -  Set this flag if
+ *                                 this subdev generates events.
+ * @token:                 Pointer to cookie of the client driver
+ * @ent_function:          Media entity function type. Can be:
+ *                             %CAM_IFE_DEVICE_TYPE - identifies as IFE device.
+ *                             %CAM_ICP_DEVICE_TYPE - identifies as ICP device.
+ *
+ * Each instance of a subdev driver should create this struct, either
+ * stand-alone or embedded in a larger struct. This structure should be
+ * initialized/registered by cam_register_subdev
+ *
+ */
+struct cam_subdev {
+	struct platform_device                *pdev;
+	struct v4l2_subdev                     sd;
+	const struct v4l2_subdev_ops          *ops;
+	const struct v4l2_subdev_internal_ops *internal_ops;
+	char                                  *name;
+	u32                                    sd_flags;
+	void                                  *token;
+	u32                                    ent_function;
+};
+
+/**
+ * cam_subdev_probe()
+ *
+ * @brief:      Camera Subdevice node probe function for v4l2 setup
+ *
+ * @sd:                    Camera subdevice object
+ * @name:                  Name of the subdevice node
+ * @dev_type:              Subdevice node type
+ *
+ */
+int cam_subdev_probe(struct cam_subdev *sd, struct platform_device *pdev,
+	char *name, uint32_t dev_type);
+
+/**
+ * cam_subdev_remove()
+ *
+ * @brief:      Called when subdevice node is unloaded
+ *
+ * @sd:                    Camera subdevice node object
+ *
+ */
+int cam_subdev_remove(struct cam_subdev *sd);
+
+/**
+ * cam_register_subdev_fops()
+ *
+ * @brief:   This common utility function assigns subdev ops
+ *
+ * @fops:    v4l file operations
+ */
+void cam_register_subdev_fops(struct v4l2_file_operations *fops);
+
+/**
+ * cam_register_subdev()
+ *
+ * @brief:   This is the common utility function to be called by each camera
+ *           subdevice node when it tries to register itself to the camera
+ *           request manager
+ *
+ * @sd:                    Pointer to struct cam_subdev.
+ */
+int cam_register_subdev(struct cam_subdev *sd);
+
+/**
+ * cam_unregister_subdev()
+ *
+ * @brief:    This is the common utility function to be called by each camera
+ *            subdevice node when it tries to unregister itself from the
+ *            camera request manger
+ *
+ * @sd:                    Pointer to struct cam_subdev.
+ */
+int cam_unregister_subdev(struct cam_subdev *sd);
+
+#endif /* _CAM_SUBDEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
new file mode 100644
index 0000000..db16724
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_res_mgr/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_utils/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ois/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
new file mode 100644
index 0000000..d342f4a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_actuator_dev.o cam_actuator_core.o cam_actuator_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
new file mode 100644
index 0000000..92718a5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.c
@@ -0,0 +1,949 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <cam_sensor_cmn_header.h>
+#include "cam_actuator_core.h"
+#include "cam_sensor_util.h"
+#include "cam_trace.h"
+#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
+
+int32_t cam_actuator_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int rc = 0;
+
+	power_info->power_setting_size = 1;
+	power_info->power_setting =
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_setting[0].seq_val = CAM_VAF;
+	power_info->power_setting[0].config_val = 1;
+	power_info->power_setting[0].delay = 2;
+
+	power_info->power_down_setting_size = 1;
+	power_info->power_down_setting =
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		rc = -ENOMEM;
+		goto free_power_settings;
+	}
+
+	power_info->power_down_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_down_setting[0].seq_val = CAM_VAF;
+	power_info->power_down_setting[0].config_val = 0;
+
+	return rc;
+
+free_power_settings:
+	kfree(power_info->power_setting);
+	power_info->power_setting = NULL;
+	power_info->power_setting_size = 0;
+	return rc;
+}
+
+static int32_t cam_actuator_power_up(struct cam_actuator_ctrl_t *a_ctrl)
+{
+	int rc = 0;
+	struct cam_hw_soc_info  *soc_info =
+		&a_ctrl->soc_info;
+	struct cam_actuator_soc_private  *soc_private;
+	struct cam_sensor_power_ctrl_t *power_info;
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	if ((power_info->power_setting == NULL) &&
+		(power_info->power_down_setting == NULL)) {
+		CAM_INFO(CAM_ACTUATOR,
+			"Using default power settings");
+		rc = cam_actuator_construct_default_power_setting(power_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Construct default actuator power setting failed.");
+			return rc;
+		}
+	}
+
+	/* Parse and fill vreg params for power up settings */
+	rc = msm_camera_fill_vreg_params(
+		&a_ctrl->soc_info,
+		power_info->power_setting,
+		power_info->power_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_ACTUATOR,
+			"failed to fill vreg params for power up rc:%d", rc);
+		return rc;
+	}
+
+	/* Parse and fill vreg params for power down settings*/
+	rc = msm_camera_fill_vreg_params(
+		&a_ctrl->soc_info,
+		power_info->power_down_setting,
+		power_info->power_down_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_ACTUATOR,
+			"failed to fill vreg params power down rc:%d", rc);
+		return rc;
+	}
+
+	power_info->dev = soc_info->dev;
+
+	rc = cam_sensor_core_power_up(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ACTUATOR,
+			"failed in actuator power up rc %d", rc);
+		return rc;
+	}
+
+	rc = camera_io_init(&a_ctrl->io_master_info);
+	if (rc < 0)
+		CAM_ERR(CAM_ACTUATOR, "cci init failed: rc: %d", rc);
+
+	return rc;
+}
+
+static int32_t cam_actuator_power_down(struct cam_actuator_ctrl_t *a_ctrl)
+{
+	int32_t rc = 0;
+	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_hw_soc_info *soc_info = &a_ctrl->soc_info;
+	struct cam_actuator_soc_private  *soc_private;
+
+	if (!a_ctrl) {
+		CAM_ERR(CAM_ACTUATOR, "failed: a_ctrl %pK", a_ctrl);
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+	soc_info = &a_ctrl->soc_info;
+
+	if (!power_info) {
+		CAM_ERR(CAM_ACTUATOR, "failed: power_info %pK", power_info);
+		return -EINVAL;
+	}
+	rc = cam_sensor_util_power_down(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_ACTUATOR, "power down the core is failed:%d", rc);
+		return rc;
+	}
+
+	camera_io_release(&a_ctrl->io_master_info);
+
+	return rc;
+}
+
+static int32_t cam_actuator_i2c_modes_util(
+	struct camera_io_master *io_master_info,
+	struct i2c_settings_list *i2c_list)
+{
+	int32_t rc = 0;
+	uint32_t i, size;
+
+	if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_RANDOM) {
+		rc = camera_io_dev_write(io_master_info,
+			&(i2c_list->i2c_settings));
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed to random write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	} else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) {
+		rc = camera_io_dev_write_continuous(
+			io_master_info,
+			&(i2c_list->i2c_settings),
+			0);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed to seq write I2C settings: %d",
+				rc);
+			return rc;
+			}
+	} else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_BURST) {
+		rc = camera_io_dev_write_continuous(
+			io_master_info,
+			&(i2c_list->i2c_settings),
+			1);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed to burst write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	} else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
+		size = i2c_list->i2c_settings.size;
+		for (i = 0; i < size; i++) {
+			rc = camera_io_dev_poll(
+			io_master_info,
+			i2c_list->i2c_settings.reg_setting[i].reg_addr,
+			i2c_list->i2c_settings.reg_setting[i].reg_data,
+			i2c_list->i2c_settings.reg_setting[i].data_mask,
+			i2c_list->i2c_settings.addr_type,
+			i2c_list->i2c_settings.data_type,
+			i2c_list->i2c_settings.reg_setting[i].delay);
+			if (rc < 0) {
+				CAM_ERR(CAM_ACTUATOR,
+					"i2c poll apply setting Fail: %d", rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+int32_t cam_actuator_slaveInfo_pkt_parser(struct cam_actuator_ctrl_t *a_ctrl,
+	uint32_t *cmd_buf)
+{
+	int32_t rc = 0;
+	struct cam_cmd_i2c_info *i2c_info;
+
+	if (!a_ctrl || !cmd_buf) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+	if (a_ctrl->io_master_info.master_type == CCI_MASTER) {
+		a_ctrl->io_master_info.cci_client->cci_i2c_master =
+			a_ctrl->cci_i2c_master;
+		a_ctrl->io_master_info.cci_client->i2c_freq_mode =
+			i2c_info->i2c_freq_mode;
+		a_ctrl->io_master_info.cci_client->sid =
+			i2c_info->slave_addr >> 1;
+		CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x Freq Mode: %d",
+			i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+	} else if (a_ctrl->io_master_info.master_type == I2C_MASTER) {
+		a_ctrl->io_master_info.client->addr = i2c_info->slave_addr;
+		CAM_DBG(CAM_ACTUATOR, "Slave addr: 0x%x", i2c_info->slave_addr);
+	} else {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Master type: %d",
+			a_ctrl->io_master_info.master_type);
+		 rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int32_t cam_actuator_apply_settings(struct cam_actuator_ctrl_t *a_ctrl,
+	struct i2c_settings_array *i2c_set)
+{
+	struct i2c_settings_list *i2c_list;
+	int32_t rc = 0;
+
+	if (a_ctrl == NULL || i2c_set == NULL) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	if (i2c_set->is_settings_valid != 1) {
+		CAM_ERR(CAM_ACTUATOR, " Invalid settings");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(i2c_list,
+		&(i2c_set->list_head), list) {
+		rc = cam_actuator_i2c_modes_util(
+			&(a_ctrl->io_master_info),
+			i2c_list);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed to apply settings: %d",
+				rc);
+		} else {
+			CAM_DBG(CAM_ACTUATOR,
+				"Success:request ID: %d",
+				i2c_set->request_id);
+		}
+	}
+
+	return rc;
+}
+
+int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+	int32_t rc = 0, request_id, del_req_id;
+	struct cam_actuator_ctrl_t *a_ctrl = NULL;
+
+	if (!apply) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Input Args");
+		return -EINVAL;
+	}
+
+	a_ctrl = (struct cam_actuator_ctrl_t *)
+		cam_get_device_priv(apply->dev_hdl);
+	if (!a_ctrl) {
+		CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
+		return -EINVAL;
+	}
+	request_id = apply->request_id % MAX_PER_FRAME_ARRAY;
+
+	trace_cam_apply_req("Actuator", apply->request_id);
+
+	CAM_DBG(CAM_ACTUATOR, "Request Id: %lld", apply->request_id);
+	mutex_lock(&(a_ctrl->actuator_mutex));
+	if ((apply->request_id ==
+		a_ctrl->i2c_data.per_frame[request_id].request_id) &&
+		(a_ctrl->i2c_data.per_frame[request_id].is_settings_valid)
+		== 1) {
+		rc = cam_actuator_apply_settings(a_ctrl,
+			&a_ctrl->i2c_data.per_frame[request_id]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed in applying the request: %lld\n",
+				apply->request_id);
+			goto release_mutex;
+		}
+	}
+	del_req_id = (request_id +
+		MAX_PER_FRAME_ARRAY - MAX_SYSTEM_PIPELINE_DELAY) %
+		MAX_PER_FRAME_ARRAY;
+
+	if (apply->request_id >
+		a_ctrl->i2c_data.per_frame[del_req_id].request_id) {
+		a_ctrl->i2c_data.per_frame[del_req_id].request_id = 0;
+		rc = delete_request(&a_ctrl->i2c_data.per_frame[del_req_id]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Fail deleting the req: %d err: %d\n",
+				del_req_id, rc);
+			goto release_mutex;
+		}
+	} else {
+		CAM_DBG(CAM_ACTUATOR, "No Valid Req to clean Up");
+	}
+
+release_mutex:
+	mutex_unlock(&(a_ctrl->actuator_mutex));
+	return rc;
+}
+
+int32_t cam_actuator_establish_link(
+	struct cam_req_mgr_core_dev_link_setup *link)
+{
+	struct cam_actuator_ctrl_t *a_ctrl = NULL;
+
+	if (!link) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	a_ctrl = (struct cam_actuator_ctrl_t *)
+		cam_get_device_priv(link->dev_hdl);
+	if (!a_ctrl) {
+		CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&(a_ctrl->actuator_mutex));
+	if (link->link_enable) {
+		a_ctrl->bridge_intf.link_hdl = link->link_hdl;
+		a_ctrl->bridge_intf.crm_cb = link->crm_cb;
+	} else {
+		a_ctrl->bridge_intf.link_hdl = -1;
+		a_ctrl->bridge_intf.crm_cb = NULL;
+	}
+	mutex_unlock(&(a_ctrl->actuator_mutex));
+
+	return 0;
+}
+
+static void cam_actuator_update_req_mgr(
+	struct cam_actuator_ctrl_t *a_ctrl,
+	struct cam_packet *csl_packet)
+{
+	struct cam_req_mgr_add_request add_req;
+
+	add_req.link_hdl = a_ctrl->bridge_intf.link_hdl;
+	add_req.req_id = csl_packet->header.request_id;
+	add_req.dev_hdl = a_ctrl->bridge_intf.device_hdl;
+	add_req.skip_before_applying = 0;
+
+	if (a_ctrl->bridge_intf.crm_cb &&
+		a_ctrl->bridge_intf.crm_cb->add_req) {
+		a_ctrl->bridge_intf.crm_cb->add_req(&add_req);
+		CAM_DBG(CAM_ACTUATOR, "Request Id: %lld added to CRM",
+			add_req.req_id);
+	} else {
+		CAM_ERR(CAM_ACTUATOR, "Can't add Request ID: %lld to CRM",
+			csl_packet->header.request_id);
+	}
+}
+
+int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+	if (!info) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	info->dev_id = CAM_REQ_MGR_DEVICE_ACTUATOR;
+	strlcpy(info->name, CAM_ACTUATOR_NAME, sizeof(info->name));
+	info->p_delay = 1;
+	info->trigger = CAM_TRIGGER_POINT_SOF;
+
+	return 0;
+}
+
+int32_t cam_actuator_i2c_pkt_parse(struct cam_actuator_ctrl_t *a_ctrl,
+	void *arg)
+{
+	int32_t  rc = 0;
+	int32_t  i = 0;
+	uint32_t total_cmd_buf_in_bytes = 0;
+	size_t   len_of_buff = 0;
+	uint32_t *offset = NULL;
+	uint32_t *cmd_buf = NULL;
+	uintptr_t generic_ptr;
+	struct common_header      *cmm_hdr = NULL;
+	struct cam_control        *ioctl_ctrl = NULL;
+	struct cam_packet         *csl_packet = NULL;
+	struct cam_config_dev_cmd config;
+	struct i2c_data_settings  *i2c_data = NULL;
+	struct i2c_settings_array *i2c_reg_settings = NULL;
+	struct cam_cmd_buf_desc   *cmd_desc = NULL;
+	struct cam_actuator_soc_private *soc_private = NULL;
+	struct cam_sensor_power_ctrl_t  *power_info = NULL;
+
+	if (!a_ctrl || !arg) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+
+	power_info = &soc_private->power_info;
+
+	ioctl_ctrl = (struct cam_control *)arg;
+	if (copy_from_user(&config,
+		u64_to_user_ptr(ioctl_ctrl->handle),
+		sizeof(config)))
+		return -EFAULT;
+	rc = cam_mem_get_cpu_buf(config.packet_handle,
+		&generic_ptr, &len_of_buff);
+	if (rc < 0) {
+		CAM_ERR(CAM_ACTUATOR, "Error in converting command Handle %d",
+			rc);
+		return rc;
+	}
+
+	if (config.offset > len_of_buff) {
+		CAM_ERR(CAM_ACTUATOR,
+			"offset is out of bounds: offset: %lld len: %zu",
+			config.offset, len_of_buff);
+		return -EINVAL;
+	}
+
+	csl_packet =
+		(struct cam_packet *)(generic_ptr + (uint32_t)config.offset);
+	CAM_DBG(CAM_ACTUATOR, "Pkt opcode: %d", csl_packet->header.op_code);
+
+	if ((csl_packet->header.op_code & 0xFFFFFF) !=
+		CAM_ACTUATOR_PACKET_OPCODE_INIT &&
+		csl_packet->header.request_id <= a_ctrl->last_flush_req
+		&& a_ctrl->last_flush_req != 0) {
+		CAM_DBG(CAM_ACTUATOR,
+			"reject request %lld, last request to flush %lld",
+			csl_packet->header.request_id, a_ctrl->last_flush_req);
+		return -EINVAL;
+	}
+
+	if (csl_packet->header.request_id > a_ctrl->last_flush_req)
+		a_ctrl->last_flush_req = 0;
+
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_ACTUATOR_PACKET_OPCODE_INIT:
+		offset = (uint32_t *)&csl_packet->payload;
+		offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+		/* Loop through multiple command buffers */
+		for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+			total_cmd_buf_in_bytes = cmd_desc[i].length;
+			if (!total_cmd_buf_in_bytes)
+				continue;
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+					&generic_ptr, &len_of_buff);
+			if (rc < 0) {
+				CAM_ERR(CAM_ACTUATOR, "Failed to get cpu buf");
+				return rc;
+			}
+			cmd_buf = (uint32_t *)generic_ptr;
+			if (!cmd_buf) {
+				CAM_ERR(CAM_ACTUATOR, "invalid cmd buf");
+				return -EINVAL;
+			}
+			cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+			cmm_hdr = (struct common_header *)cmd_buf;
+
+			switch (cmm_hdr->cmd_type) {
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+				CAM_DBG(CAM_ACTUATOR,
+					"Received slave info buffer");
+				rc = cam_actuator_slaveInfo_pkt_parser(
+					a_ctrl, cmd_buf);
+				if (rc < 0) {
+					CAM_ERR(CAM_ACTUATOR,
+					"Failed to parse slave info: %d", rc);
+					return rc;
+				}
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+				CAM_DBG(CAM_ACTUATOR,
+					"Received power settings buffer");
+				rc = cam_sensor_update_power_settings(
+					cmd_buf,
+					total_cmd_buf_in_bytes,
+					power_info);
+				if (rc) {
+					CAM_ERR(CAM_ACTUATOR,
+					"Failed:parse power settings: %d",
+					rc);
+					return rc;
+				}
+				break;
+			default:
+				CAM_DBG(CAM_ACTUATOR,
+					"Received initSettings buffer");
+				i2c_data = &(a_ctrl->i2c_data);
+				i2c_reg_settings =
+					&i2c_data->init_settings;
+
+				i2c_reg_settings->request_id = 0;
+				i2c_reg_settings->is_settings_valid = 1;
+				rc = cam_sensor_i2c_command_parser(
+					&a_ctrl->io_master_info,
+					i2c_reg_settings,
+					&cmd_desc[i], 1);
+				if (rc < 0) {
+					CAM_ERR(CAM_ACTUATOR,
+					"Failed:parse init settings: %d",
+					rc);
+					return rc;
+				}
+				break;
+			}
+		}
+
+		if (a_ctrl->cam_act_state == CAM_ACTUATOR_ACQUIRE) {
+			rc = cam_actuator_power_up(a_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_ACTUATOR,
+					" Actuator Power up failed");
+				return rc;
+			}
+			a_ctrl->cam_act_state = CAM_ACTUATOR_CONFIG;
+		}
+
+		rc = cam_actuator_apply_settings(a_ctrl,
+			&a_ctrl->i2c_data.init_settings);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR, "Cannot apply Init settings");
+			return rc;
+		}
+
+		/* Delete the request even if the apply is failed */
+		rc = delete_request(&a_ctrl->i2c_data.init_settings);
+		if (rc < 0) {
+			CAM_WARN(CAM_ACTUATOR,
+				"Fail in deleting the Init settings");
+			rc = 0;
+		}
+		break;
+	case CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS:
+		if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+				"Not in right state to move lens: %d",
+				a_ctrl->cam_act_state);
+			return rc;
+		}
+		a_ctrl->setting_apply_state = ACT_APPLY_SETTINGS_NOW;
+
+		i2c_data = &(a_ctrl->i2c_data);
+		i2c_reg_settings = &i2c_data->init_settings;
+
+		i2c_data->init_settings.request_id =
+			csl_packet->header.request_id;
+		i2c_reg_settings->is_settings_valid = 1;
+		offset = (uint32_t *)&csl_packet->payload;
+		offset += csl_packet->cmd_buf_offset / sizeof(uint32_t);
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_sensor_i2c_command_parser(
+			&a_ctrl->io_master_info,
+			i2c_reg_settings,
+			cmd_desc, 1);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Auto move lens parsing failed: %d", rc);
+			return rc;
+		}
+		cam_actuator_update_req_mgr(a_ctrl, csl_packet);
+		break;
+	case CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS:
+		if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+				"Not in right state to move lens: %d",
+				a_ctrl->cam_act_state);
+			return rc;
+		}
+
+		a_ctrl->setting_apply_state = ACT_APPLY_SETTINGS_LATER;
+		i2c_data = &(a_ctrl->i2c_data);
+		i2c_reg_settings = &i2c_data->per_frame[
+			csl_packet->header.request_id % MAX_PER_FRAME_ARRAY];
+
+		 i2c_reg_settings->request_id =
+			csl_packet->header.request_id;
+		i2c_reg_settings->is_settings_valid = 1;
+		offset = (uint32_t *)&csl_packet->payload;
+		offset += csl_packet->cmd_buf_offset / sizeof(uint32_t);
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_sensor_i2c_command_parser(
+			&a_ctrl->io_master_info,
+			i2c_reg_settings,
+			cmd_desc, 1);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Manual move lens parsing failed: %d", rc);
+			return rc;
+		}
+
+		cam_actuator_update_req_mgr(a_ctrl, csl_packet);
+		break;
+	case CAM_PKT_NOP_OPCODE:
+		if (a_ctrl->cam_act_state < CAM_ACTUATOR_CONFIG) {
+			CAM_WARN(CAM_ACTUATOR,
+				"Received NOP packets in invalid state: %d",
+				a_ctrl->cam_act_state);
+			return -EINVAL;
+		}
+
+		cam_actuator_update_req_mgr(a_ctrl, csl_packet);
+		break;
+	}
+
+	return rc;
+}
+
+void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl)
+{
+	int rc = 0;
+	struct cam_actuator_soc_private  *soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&soc_private->power_info;
+
+	if (a_ctrl->cam_act_state == CAM_ACTUATOR_INIT)
+		return;
+
+	if (a_ctrl->cam_act_state >= CAM_ACTUATOR_CONFIG) {
+		rc = cam_actuator_power_down(a_ctrl);
+		if (rc < 0)
+			CAM_ERR(CAM_ACTUATOR, "Actuator Power down failed");
+		a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
+	}
+
+	if (a_ctrl->cam_act_state >= CAM_ACTUATOR_ACQUIRE) {
+		rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_ACTUATOR, "destroying  dhdl failed");
+		a_ctrl->bridge_intf.device_hdl = -1;
+		a_ctrl->bridge_intf.link_hdl = -1;
+		a_ctrl->bridge_intf.session_hdl = -1;
+	}
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	power_info->power_setting_size = 0;
+	power_info->power_down_setting_size = 0;
+
+	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
+}
+
+int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl,
+	void *arg)
+{
+	int rc = 0;
+	struct cam_control *cmd = (struct cam_control *)arg;
+	struct cam_actuator_soc_private *soc_private = NULL;
+	struct cam_sensor_power_ctrl_t  *power_info = NULL;
+
+	if (!a_ctrl || !cmd) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+
+	power_info = &soc_private->power_info;
+
+	if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid handle type: %d",
+			cmd->handle_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_ACTUATOR, "Opcode to Actuator: %d", cmd->op_code);
+
+	mutex_lock(&(a_ctrl->actuator_mutex));
+	switch (cmd->op_code) {
+	case CAM_ACQUIRE_DEV: {
+		struct cam_sensor_acquire_dev actuator_acq_dev;
+		struct cam_create_dev_hdl bridge_params;
+
+		if (a_ctrl->bridge_intf.device_hdl != -1) {
+			CAM_ERR(CAM_ACTUATOR, "Device is already acquired");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = copy_from_user(&actuator_acq_dev,
+			u64_to_user_ptr(cmd->handle),
+			sizeof(actuator_acq_dev));
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR, "Failed Copying from user\n");
+			goto release_mutex;
+		}
+
+		bridge_params.session_hdl = actuator_acq_dev.session_handle;
+		bridge_params.ops = &a_ctrl->bridge_intf.ops;
+		bridge_params.v4l2_sub_dev_flag = 0;
+		bridge_params.media_entity_flag = 0;
+		bridge_params.priv = a_ctrl;
+
+		actuator_acq_dev.device_handle =
+			cam_create_device_hdl(&bridge_params);
+		a_ctrl->bridge_intf.device_hdl = actuator_acq_dev.device_handle;
+		a_ctrl->bridge_intf.session_hdl =
+			actuator_acq_dev.session_handle;
+
+		CAM_DBG(CAM_ACTUATOR, "Device Handle: %d",
+			actuator_acq_dev.device_handle);
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&actuator_acq_dev,
+			sizeof(struct cam_sensor_acquire_dev))) {
+			CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+
+		a_ctrl->cam_act_state = CAM_ACTUATOR_ACQUIRE;
+	}
+		break;
+	case CAM_RELEASE_DEV: {
+		if (a_ctrl->cam_act_state == CAM_ACTUATOR_START) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+				"Cant release actuator: in start state");
+			goto release_mutex;
+		}
+
+		if (a_ctrl->cam_act_state == CAM_ACTUATOR_CONFIG) {
+			rc = cam_actuator_power_down(a_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_ACTUATOR,
+					"Actuator Power down failed");
+				goto release_mutex;
+			}
+		}
+
+		if (a_ctrl->bridge_intf.device_hdl == -1) {
+			CAM_ERR(CAM_ACTUATOR, "link hdl: %d device hdl: %d",
+				a_ctrl->bridge_intf.device_hdl,
+				a_ctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if (a_ctrl->bridge_intf.link_hdl != -1) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Device [%d] still active on link 0x%x",
+				a_ctrl->cam_act_state,
+				a_ctrl->bridge_intf.link_hdl);
+			rc = -EAGAIN;
+			goto release_mutex;
+		}
+
+		rc = cam_destroy_device_hdl(a_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_ACTUATOR, "destroying the device hdl");
+		a_ctrl->bridge_intf.device_hdl = -1;
+		a_ctrl->bridge_intf.link_hdl = -1;
+		a_ctrl->bridge_intf.session_hdl = -1;
+		a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
+		a_ctrl->last_flush_req = 0;
+		kfree(power_info->power_setting);
+		kfree(power_info->power_down_setting);
+		power_info->power_setting = NULL;
+		power_info->power_down_setting = NULL;
+		power_info->power_down_setting_size = 0;
+		power_info->power_setting_size = 0;
+	}
+		break;
+	case CAM_QUERY_CAP: {
+		struct cam_actuator_query_cap actuator_cap = {0};
+
+		actuator_cap.slot_info = a_ctrl->soc_info.index;
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&actuator_cap,
+			sizeof(struct cam_actuator_query_cap))) {
+			CAM_ERR(CAM_ACTUATOR, "Failed Copy to User");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_START_DEV: {
+		if (a_ctrl->cam_act_state != CAM_ACTUATOR_CONFIG) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+			"Not in right state to start : %d",
+			a_ctrl->cam_act_state);
+			goto release_mutex;
+		}
+		a_ctrl->cam_act_state = CAM_ACTUATOR_START;
+		a_ctrl->last_flush_req = 0;
+	}
+		break;
+	case CAM_STOP_DEV: {
+		struct i2c_settings_array *i2c_set = NULL;
+		int i;
+
+		if (a_ctrl->cam_act_state != CAM_ACTUATOR_START) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_ACTUATOR,
+			"Not in right state to stop : %d",
+			a_ctrl->cam_act_state);
+			goto release_mutex;
+		}
+
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			i2c_set = &(a_ctrl->i2c_data.per_frame[i]);
+
+			if (i2c_set->is_settings_valid == 1) {
+				rc = delete_request(i2c_set);
+				if (rc < 0)
+					CAM_ERR(CAM_SENSOR,
+						"delete request: %lld rc: %d",
+						i2c_set->request_id, rc);
+			}
+		}
+		a_ctrl->last_flush_req = 0;
+		a_ctrl->cam_act_state = CAM_ACTUATOR_CONFIG;
+	}
+		break;
+	case CAM_CONFIG_DEV: {
+		a_ctrl->setting_apply_state =
+			ACT_APPLY_SETTINGS_LATER;
+		rc = cam_actuator_i2c_pkt_parse(a_ctrl, arg);
+		if (rc < 0) {
+			CAM_ERR(CAM_ACTUATOR, "Failed in actuator Parsing");
+			goto release_mutex;
+		}
+
+		if (a_ctrl->setting_apply_state ==
+			ACT_APPLY_SETTINGS_NOW) {
+			rc = cam_actuator_apply_settings(a_ctrl,
+				&a_ctrl->i2c_data.init_settings);
+			if (rc < 0)
+				CAM_ERR(CAM_ACTUATOR,
+					"Cannot apply Update settings");
+
+			/* Delete the request even if the apply is failed */
+			rc = delete_request(&a_ctrl->i2c_data.init_settings);
+			if (rc < 0) {
+				CAM_ERR(CAM_ACTUATOR,
+					"Failed in Deleting the Init Pkt: %d",
+					rc);
+				goto release_mutex;
+			}
+		}
+	}
+		break;
+	default:
+		CAM_ERR(CAM_ACTUATOR, "Invalid Opcode %d", cmd->op_code);
+	}
+
+release_mutex:
+	mutex_unlock(&(a_ctrl->actuator_mutex));
+
+	return rc;
+}
+
+int32_t cam_actuator_flush_request(struct cam_req_mgr_flush_request *flush_req)
+{
+	int32_t rc = 0, i;
+	uint32_t cancel_req_id_found = 0;
+	struct cam_actuator_ctrl_t *a_ctrl = NULL;
+	struct i2c_settings_array *i2c_set = NULL;
+
+	if (!flush_req)
+		return -EINVAL;
+
+	a_ctrl = (struct cam_actuator_ctrl_t *)
+		cam_get_device_priv(flush_req->dev_hdl);
+	if (!a_ctrl) {
+		CAM_ERR(CAM_ACTUATOR, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	if (a_ctrl->i2c_data.per_frame == NULL) {
+		CAM_ERR(CAM_ACTUATOR, "i2c frame data is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&(a_ctrl->actuator_mutex));
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		a_ctrl->last_flush_req = flush_req->req_id;
+		CAM_DBG(CAM_ACTUATOR, "last reqest to flush is %lld",
+			flush_req->req_id);
+	}
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+		i2c_set = &(a_ctrl->i2c_data.per_frame[i]);
+
+		if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+				&& (i2c_set->request_id != flush_req->req_id))
+			continue;
+
+		if (i2c_set->is_settings_valid == 1) {
+			rc = delete_request(i2c_set);
+			if (rc < 0)
+				CAM_ERR(CAM_ACTUATOR,
+					"delete request: %lld rc: %d",
+					i2c_set->request_id, rc);
+
+			if (flush_req->type ==
+				CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+				cancel_req_id_found = 1;
+				break;
+			}
+		}
+	}
+
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+		!cancel_req_id_found)
+		CAM_DBG(CAM_ACTUATOR,
+			"Flush request id:%lld not found in the pending list",
+			flush_req->req_id);
+	mutex_unlock(&(a_ctrl->actuator_mutex));
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
new file mode 100644
index 0000000..c6999e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_core.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ACTUATOR_CORE_H_
+#define _CAM_ACTUATOR_CORE_H_
+
+#include "cam_actuator_dev.h"
+
+/**
+ * @power_info: power setting info to control the power
+ *
+ * This API construct the default actuator power setting.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int32_t cam_actuator_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info);
+
+/**
+ * @apply: Req mgr structure for applying request
+ *
+ * This API applies the request that is mentioned
+ */
+int32_t cam_actuator_apply_request(struct cam_req_mgr_apply_request *apply);
+
+/**
+ * @info: Sub device info to req mgr
+ *
+ * This API publish the subdevice info to req mgr
+ */
+int32_t cam_actuator_publish_dev_info(struct cam_req_mgr_device_info *info);
+
+/**
+ * @flush: Req mgr structure for flushing request
+ *
+ * This API flushes the request that is mentioned
+ */
+int cam_actuator_flush_request(struct cam_req_mgr_flush_request *flush);
+
+
+/**
+ * @link: Link setup info
+ *
+ * This API establishes link actuator subdevice with req mgr
+ */
+int32_t cam_actuator_establish_link(
+	struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * @a_ctrl: Actuator ctrl structure
+ * @arg:    Camera control command argument
+ *
+ * This API handles the camera control argument reached to actuator
+ */
+int32_t cam_actuator_driver_cmd(struct cam_actuator_ctrl_t *a_ctrl, void *arg);
+
+/**
+ * @a_ctrl: Actuator ctrl structure
+ *
+ * This API handles the shutdown ioctl/close
+ */
+void cam_actuator_shutdown(struct cam_actuator_ctrl_t *a_ctrl);
+
+#endif /* _CAM_ACTUATOR_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
new file mode 100644
index 0000000..fce5f6d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_actuator_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_actuator_soc.h"
+#include "cam_actuator_core.h"
+#include "cam_trace.h"
+
+static long cam_actuator_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int rc = 0;
+	struct cam_actuator_ctrl_t *a_ctrl =
+		v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_actuator_driver_cmd(a_ctrl, arg);
+		break;
+	default:
+		CAM_ERR(CAM_ACTUATOR, "Invalid ioctl cmd");
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_actuator_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_ACTUATOR,
+			"Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		cmd = VIDIOC_CAM_CONTROL;
+		rc = cam_actuator_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed in actuator subdev handling rc: %d",
+				rc);
+			return rc;
+		}
+	break;
+	default:
+		CAM_ERR(CAM_ACTUATOR, "Invalid compat ioctl: %d", cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+#endif
+
+static int cam_actuator_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_actuator_ctrl_t *a_ctrl =
+		v4l2_get_subdevdata(sd);
+
+	if (!a_ctrl) {
+		CAM_ERR(CAM_ACTUATOR, "a_ctrl ptr is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&(a_ctrl->actuator_mutex));
+	cam_actuator_shutdown(a_ctrl);
+	mutex_unlock(&(a_ctrl->actuator_mutex));
+
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops cam_actuator_subdev_core_ops = {
+	.ioctl = cam_actuator_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_actuator_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_actuator_subdev_ops = {
+	.core = &cam_actuator_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_actuator_internal_ops = {
+	.close = cam_actuator_subdev_close,
+};
+
+static int cam_actuator_init_subdev(struct cam_actuator_ctrl_t *a_ctrl)
+{
+	int rc = 0;
+
+	a_ctrl->v4l2_dev_str.internal_ops =
+		&cam_actuator_internal_ops;
+	a_ctrl->v4l2_dev_str.ops =
+		&cam_actuator_subdev_ops;
+	strlcpy(a_ctrl->device_name, CAMX_ACTUATOR_DEV_NAME,
+		sizeof(a_ctrl->device_name));
+	a_ctrl->v4l2_dev_str.name =
+		a_ctrl->device_name;
+	a_ctrl->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	a_ctrl->v4l2_dev_str.ent_function =
+		CAM_ACTUATOR_DEVICE_TYPE;
+	a_ctrl->v4l2_dev_str.token = a_ctrl;
+
+	rc = cam_register_subdev(&(a_ctrl->v4l2_dev_str));
+	if (rc)
+		CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev rc: %d", rc);
+
+	return rc;
+}
+
+static int32_t cam_actuator_driver_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int32_t                         rc = 0;
+	int32_t                         i = 0;
+	struct cam_actuator_ctrl_t      *a_ctrl;
+	struct cam_hw_soc_info          *soc_info = NULL;
+	struct cam_actuator_soc_private *soc_private = NULL;
+
+	if (client == NULL || id == NULL) {
+		CAM_ERR(CAM_ACTUATOR, "Invalid Args client: %pK id: %pK",
+			client, id);
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CAM_ERR(CAM_ACTUATOR, "%s :: i2c_check_functionality failed",
+			 client->name);
+		rc = -EFAULT;
+		return rc;
+	}
+
+	/* Create sensor control structure */
+	a_ctrl = kzalloc(sizeof(*a_ctrl), GFP_KERNEL);
+	if (!a_ctrl)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, a_ctrl);
+
+	soc_private = kzalloc(sizeof(struct cam_actuator_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto free_ctrl;
+	}
+	a_ctrl->soc_info.soc_private = soc_private;
+
+	a_ctrl->io_master_info.client = client;
+	soc_info = &a_ctrl->soc_info;
+	soc_info->dev = &client->dev;
+	soc_info->dev_name = client->name;
+	a_ctrl->io_master_info.master_type = I2C_MASTER;
+
+	rc = cam_actuator_parse_dt(a_ctrl, &client->dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_ACTUATOR, "failed: cam_sensor_parse_dt rc %d", rc);
+		goto free_soc;
+	}
+
+	rc = cam_actuator_init_subdev(a_ctrl);
+	if (rc)
+		goto free_soc;
+
+	if (soc_private->i2c_info.slave_addr != 0)
+		a_ctrl->io_master_info.client->addr =
+			soc_private->i2c_info.slave_addr;
+
+	a_ctrl->i2c_data.per_frame =
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (a_ctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto unreg_subdev;
+	}
+
+	INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head));
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
+
+	a_ctrl->bridge_intf.device_hdl = -1;
+	a_ctrl->bridge_intf.link_hdl = -1;
+	a_ctrl->bridge_intf.ops.get_dev_info =
+		cam_actuator_publish_dev_info;
+	a_ctrl->bridge_intf.ops.link_setup =
+		cam_actuator_establish_link;
+	a_ctrl->bridge_intf.ops.apply_req =
+		cam_actuator_apply_request;
+	a_ctrl->last_flush_req = 0;
+
+	v4l2_set_subdevdata(&(a_ctrl->v4l2_dev_str.sd), a_ctrl);
+
+	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
+
+	return rc;
+
+unreg_subdev:
+	cam_unregister_subdev(&(a_ctrl->v4l2_dev_str));
+free_soc:
+	kfree(soc_private);
+free_ctrl:
+	kfree(a_ctrl);
+	return rc;
+}
+
+static int32_t cam_actuator_platform_remove(struct platform_device *pdev)
+{
+	int32_t rc = 0;
+	struct cam_actuator_ctrl_t      *a_ctrl;
+	struct cam_actuator_soc_private *soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info;
+
+	a_ctrl = platform_get_drvdata(pdev);
+	if (!a_ctrl) {
+		CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
+		return 0;
+	}
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	kfree(a_ctrl->io_master_info.cci_client);
+	a_ctrl->io_master_info.cci_client = NULL;
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	kfree(a_ctrl->soc_info.soc_private);
+	kfree(a_ctrl->i2c_data.per_frame);
+	a_ctrl->i2c_data.per_frame = NULL;
+	devm_kfree(&pdev->dev, a_ctrl);
+
+	return rc;
+}
+
+static int32_t cam_actuator_driver_i2c_remove(struct i2c_client *client)
+{
+	int32_t rc = 0;
+	struct cam_actuator_ctrl_t      *a_ctrl =
+		i2c_get_clientdata(client);
+	struct cam_actuator_soc_private *soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info;
+
+	/* Handle I2C Devices */
+	if (!a_ctrl) {
+		CAM_ERR(CAM_ACTUATOR, "Actuator device is NULL");
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	/*Free Allocated Mem */
+	kfree(a_ctrl->i2c_data.per_frame);
+	a_ctrl->i2c_data.per_frame = NULL;
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	kfree(a_ctrl->soc_info.soc_private);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	a_ctrl->soc_info.soc_private = NULL;
+	kfree(a_ctrl);
+	return rc;
+}
+
+static const struct of_device_id cam_actuator_driver_dt_match[] = {
+	{.compatible = "qcom,actuator"},
+	{}
+};
+
+static int32_t cam_actuator_driver_platform_probe(
+	struct platform_device *pdev)
+{
+	int32_t                         rc = 0;
+	int32_t                         i = 0;
+	struct cam_actuator_ctrl_t      *a_ctrl = NULL;
+	struct cam_actuator_soc_private *soc_private = NULL;
+
+	/* Create actuator control structure */
+	a_ctrl = devm_kzalloc(&pdev->dev,
+		sizeof(struct cam_actuator_ctrl_t), GFP_KERNEL);
+	if (!a_ctrl)
+		return -ENOMEM;
+
+	/*fill in platform device*/
+	a_ctrl->v4l2_dev_str.pdev = pdev;
+	a_ctrl->soc_info.pdev = pdev;
+	a_ctrl->soc_info.dev = &pdev->dev;
+	a_ctrl->soc_info.dev_name = pdev->name;
+	a_ctrl->io_master_info.master_type = CCI_MASTER;
+
+	a_ctrl->io_master_info.cci_client = kzalloc(sizeof(
+		struct cam_sensor_cci_client), GFP_KERNEL);
+	if (!(a_ctrl->io_master_info.cci_client)) {
+		rc = -ENOMEM;
+		goto free_ctrl;
+	}
+
+	soc_private = kzalloc(sizeof(struct cam_actuator_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto free_cci_client;
+	}
+	a_ctrl->soc_info.soc_private = soc_private;
+	soc_private->power_info.dev = &pdev->dev;
+
+	a_ctrl->i2c_data.per_frame =
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (a_ctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto free_soc;
+	}
+
+	INIT_LIST_HEAD(&(a_ctrl->i2c_data.init_settings.list_head));
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(a_ctrl->i2c_data.per_frame[i].list_head));
+
+	rc = cam_actuator_parse_dt(a_ctrl, &(pdev->dev));
+	if (rc < 0) {
+		CAM_ERR(CAM_ACTUATOR, "Paring actuator dt failed rc %d", rc);
+		goto free_mem;
+	}
+
+	/* Fill platform device id*/
+	pdev->id = a_ctrl->soc_info.index;
+
+	rc = cam_actuator_init_subdev(a_ctrl);
+	if (rc)
+		goto free_mem;
+
+	a_ctrl->bridge_intf.device_hdl = -1;
+	a_ctrl->bridge_intf.link_hdl = -1;
+	a_ctrl->bridge_intf.ops.get_dev_info =
+		cam_actuator_publish_dev_info;
+	a_ctrl->bridge_intf.ops.link_setup =
+		cam_actuator_establish_link;
+	a_ctrl->bridge_intf.ops.apply_req =
+		cam_actuator_apply_request;
+	a_ctrl->bridge_intf.ops.flush_req =
+		cam_actuator_flush_request;
+	a_ctrl->last_flush_req = 0;
+
+	platform_set_drvdata(pdev, a_ctrl);
+	v4l2_set_subdevdata(&a_ctrl->v4l2_dev_str.sd, a_ctrl);
+	a_ctrl->cam_act_state = CAM_ACTUATOR_INIT;
+
+	return rc;
+
+free_mem:
+	kfree(a_ctrl->i2c_data.per_frame);
+free_soc:
+	kfree(soc_private);
+free_cci_client:
+	kfree(a_ctrl->io_master_info.cci_client);
+free_ctrl:
+	devm_kfree(&pdev->dev, a_ctrl);
+	return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_actuator_driver_dt_match);
+
+static struct platform_driver cam_actuator_platform_driver = {
+	.probe = cam_actuator_driver_platform_probe,
+	.driver = {
+		.name = "qcom,actuator",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_actuator_driver_dt_match,
+		.suppress_bind_attrs = true,
+	},
+	.remove = cam_actuator_platform_remove,
+};
+
+static const struct i2c_device_id i2c_id[] = {
+	{ACTUATOR_DRIVER_I2C, (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_actuator_driver_i2c = {
+	.id_table = i2c_id,
+	.probe  = cam_actuator_driver_i2c_probe,
+	.remove = cam_actuator_driver_i2c_remove,
+	.driver = {
+		.name = ACTUATOR_DRIVER_I2C,
+	},
+};
+
+static int __init cam_actuator_driver_init(void)
+{
+	int32_t rc = 0;
+
+	rc = platform_driver_register(&cam_actuator_platform_driver);
+	if (rc < 0) {
+		CAM_ERR(CAM_ACTUATOR,
+			"platform_driver_register failed rc = %d", rc);
+		return rc;
+	}
+	rc = i2c_add_driver(&cam_actuator_driver_i2c);
+	if (rc)
+		CAM_ERR(CAM_ACTUATOR, "i2c_add_driver failed rc = %d", rc);
+
+	return rc;
+}
+
+static void __exit cam_actuator_driver_exit(void)
+{
+	platform_driver_unregister(&cam_actuator_platform_driver);
+	i2c_del_driver(&cam_actuator_driver_i2c);
+}
+
+module_init(cam_actuator_driver_init);
+module_exit(cam_actuator_driver_exit);
+MODULE_DESCRIPTION("cam_actuator_driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
new file mode 100644
index 0000000..1809b29
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_dev.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _CAM_ACTUATOR_DEV_H_
+#define _CAM_ACTUATOR_DEV_H_
+
+#include <cam_sensor_io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_cci_dev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_subdev.h>
+#include "cam_sensor_util.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE  1
+#define FALSE 0
+
+#define ACTUATOR_DRIVER_I2C "i2c_actuator"
+#define CAMX_ACTUATOR_DEV_NAME "cam-actuator-driver"
+
+#define MSM_ACTUATOR_MAX_VREGS (10)
+#define ACTUATOR_MAX_POLL_COUNT 10
+
+
+enum cam_actuator_apply_state_t {
+	ACT_APPLY_SETTINGS_NOW,
+	ACT_APPLY_SETTINGS_LATER,
+};
+
+enum cam_actuator_state {
+	CAM_ACTUATOR_INIT,
+	CAM_ACTUATOR_ACQUIRE,
+	CAM_ACTUATOR_CONFIG,
+	CAM_ACTUATOR_START,
+};
+
+/**
+ * struct cam_actuator_i2c_info_t - I2C info
+ * @slave_addr      :   slave address
+ * @i2c_freq_mode   :   i2c frequency mode
+ */
+struct cam_actuator_i2c_info_t {
+	uint16_t slave_addr;
+	uint8_t i2c_freq_mode;
+};
+
+struct cam_actuator_soc_private {
+	struct cam_actuator_i2c_info_t i2c_info;
+	struct cam_sensor_power_ctrl_t power_info;
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+	int32_t device_hdl;
+	int32_t session_hdl;
+	int32_t link_hdl;
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_actuator_ctrl_t
+ * @i2c_driver: I2C device info
+ * @pdev: Platform device
+ * @cci_i2c_master: I2C structure
+ * @io_master_info: Information about the communication master
+ * @actuator_mutex: Actuator mutex
+ * @act_apply_state: Actuator settings aRegulator config
+ * @id: Cell Index
+ * @res_apply_state: Actuator settings apply state
+ * @cam_act_state:   Actuator state
+ * @gconf: GPIO config
+ * @pinctrl_info: Pinctrl information
+ * @v4l2_dev_str: V4L2 device structure
+ * @i2c_data: I2C register settings structure
+ * @act_info: Sensor query cap structure
+ * @of_node: Node ptr
+ * @device_name: Device name
+ * @last_flush_req: Last request to flush
+ */
+struct cam_actuator_ctrl_t {
+	struct i2c_driver *i2c_driver;
+	enum cci_i2c_master_t cci_i2c_master;
+	enum cci_device_num cci_num;
+	struct camera_io_master io_master_info;
+	struct cam_hw_soc_info soc_info;
+	struct mutex actuator_mutex;
+	uint32_t id;
+	enum cam_actuator_apply_state_t setting_apply_state;
+	enum cam_actuator_state cam_act_state;
+	uint8_t cam_pinctrl_status;
+	struct cam_subdev v4l2_dev_str;
+	struct i2c_data_settings i2c_data;
+	struct cam_actuator_query_cap act_info;
+	struct intf_params bridge_intf;
+	char device_name[20];
+	uint32_t last_flush_req;
+};
+
+#endif /* _CAM_ACTUATOR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
new file mode 100644
index 0000000..d34941d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+#include "cam_actuator_soc.h"
+#include "cam_soc_util.h"
+
+int32_t cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
+	struct device *dev)
+{
+	int32_t                         rc = 0;
+	struct cam_hw_soc_info          *soc_info = &a_ctrl->soc_info;
+	struct cam_actuator_soc_private *soc_private =
+		(struct cam_actuator_soc_private *)a_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info = &soc_private->power_info;
+	struct device_node              *of_node = NULL;
+
+	/* Initialize mutex */
+	mutex_init(&(a_ctrl->actuator_mutex));
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_ACTUATOR, "parsing common soc dt(rc %d)", rc);
+		return rc;
+	}
+
+	of_node = soc_info->dev->of_node;
+
+	if (a_ctrl->io_master_info.master_type == CCI_MASTER) {
+		rc = of_property_read_u32(of_node, "cci-master",
+			&(a_ctrl->cci_i2c_master));
+		CAM_DBG(CAM_ACTUATOR, "cci-master %d, rc %d",
+			a_ctrl->cci_i2c_master, rc);
+		if ((rc < 0) || (a_ctrl->cci_i2c_master >= MASTER_MAX)) {
+			CAM_ERR(CAM_ACTUATOR,
+				"Wrong info: rc: %d, dt CCI master:%d",
+				rc, a_ctrl->cci_i2c_master);
+			rc = -EFAULT;
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node, "cci-device",
+			&a_ctrl->cci_num);
+		CAM_DBG(CAM_ACTUATOR, "cci-device %d, rc %d",
+			a_ctrl->cci_num, rc);
+		if (rc < 0) {
+			/* Set default master 0 */
+			a_ctrl->cci_num = CCI_DEVICE_0;
+			rc = 0;
+		}
+		a_ctrl->io_master_info.cci_client->cci_device = a_ctrl->cci_num;
+	}
+
+	if (!soc_info->gpio_data) {
+		CAM_INFO(CAM_ACTUATOR, "No GPIO found");
+		rc = 0;
+		return rc;
+	}
+
+	if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+		CAM_INFO(CAM_ACTUATOR, "No GPIO found");
+		return -EINVAL;
+	}
+
+	rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+		&power_info->gpio_num_info);
+	if ((rc < 0) || (!power_info->gpio_num_info)) {
+		CAM_ERR(CAM_ACTUATOR, "No/Error Actuator GPIOs");
+		return -EINVAL;
+	}
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.h
new file mode 100644
index 0000000..6cfb1a5
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_actuator/cam_actuator_soc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_ACTUATOR_SOC_H_
+#define _CAM_ACTUATOR_SOC_H_
+
+#include "cam_actuator_dev.h"
+
+/**
+ * @a_ctrl: Actuator ctrl structure
+ *
+ * This API parses actuator device tree
+ */
+int cam_actuator_parse_dt(struct cam_actuator_ctrl_t *a_ctrl,
+	struct device *dev);
+
+#endif /* _CAM_ACTUATOR_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
new file mode 100644
index 0000000..e7d1f2e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_cci_dev.o cam_cci_core.o cam_cci_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
new file mode 100644
index 0000000..e8a592a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.c
@@ -0,0 +1,1639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include "cam_cci_core.h"
+#include "cam_cci_dev.h"
+
+static int32_t cam_cci_convert_type_to_num_bytes(
+	enum camera_sensor_i2c_type type)
+{
+	int32_t num_bytes;
+
+	switch (type) {
+	case CAMERA_SENSOR_I2C_TYPE_BYTE:
+		num_bytes = 1;
+		break;
+	case CAMERA_SENSOR_I2C_TYPE_WORD:
+		num_bytes = 2;
+		break;
+	case CAMERA_SENSOR_I2C_TYPE_3B:
+		num_bytes = 3;
+		break;
+	case CAMERA_SENSOR_I2C_TYPE_DWORD:
+		num_bytes = 4;
+		break;
+	default:
+		CAM_ERR(CAM_CCI, "failed: %d", type);
+		num_bytes = 0;
+		break;
+	}
+	return num_bytes;
+}
+
+static void cam_cci_flush_queue(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master)
+{
+	int32_t rc = 0;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+
+	cam_io_w_mb(1 << master, base + CCI_HALT_REQ_ADDR);
+	rc = wait_for_completion_timeout(
+		&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "wait failed");
+	} else if (rc == 0) {
+		CAM_ERR(CAM_CCI, "wait timeout");
+
+		/* Set reset pending flag to TRUE */
+		cci_dev->cci_master_info[master].reset_pending = TRUE;
+
+		/* Set proper mask to RESET CMD address based on MASTER */
+		if (master == MASTER_0)
+			cam_io_w_mb(CCI_M0_RESET_RMSK,
+				base + CCI_RESET_CMD_ADDR);
+		else
+			cam_io_w_mb(CCI_M1_RESET_RMSK,
+				base + CCI_RESET_CMD_ADDR);
+
+		/* wait for reset done irq */
+		rc = wait_for_completion_timeout(
+			&cci_dev->cci_master_info[master].reset_complete,
+			CCI_TIMEOUT);
+		if (rc <= 0)
+			CAM_ERR(CAM_CCI, "wait failed %d", rc);
+	}
+}
+
+static int32_t cam_cci_validate_queue(struct cci_device *cci_dev,
+	uint32_t len,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	uint32_t read_val = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+	unsigned long flags;
+
+	read_val = cam_io_r_mb(base +
+		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d len %d max %d",
+		read_val, len,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+	if ((read_val + len + 1) >
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size) {
+		uint32_t reg_val = 0;
+		uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+		CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD");
+		cam_io_w_mb(report_val,
+			base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+			reg_offset);
+		read_val++;
+		CAM_DBG(CAM_CCI,
+			"CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d, queue: %d",
+			read_val, queue);
+		cam_io_w_mb(read_val, base +
+			CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		reg_val = 1 << ((master * 2) + queue);
+		CAM_DBG(CAM_CCI, "CCI_QUEUE_START_ADDR");
+		spin_lock_irqsave(
+			&cci_dev->cci_master_info[master].lock_q[queue], flags);
+		atomic_set(
+			&cci_dev->cci_master_info[master].done_pending[queue],
+			1);
+		cam_io_w_mb(reg_val, base +
+			CCI_QUEUE_START_ADDR);
+		CAM_DBG(CAM_CCI, "wait_for_completion_timeout");
+		atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[master].lock_q[queue], flags);
+		rc = wait_for_completion_timeout(
+			&cci_dev->cci_master_info[master].report_q[queue],
+			CCI_TIMEOUT);
+		if (rc <= 0) {
+			CAM_ERR(CAM_CCI, "Wait_for_completion_timeout: rc: %d",
+				rc);
+			if (rc == 0)
+				rc = -ETIMEDOUT;
+			cam_cci_flush_queue(cci_dev, master);
+			return rc;
+		}
+		rc = cci_dev->cci_master_info[master].status;
+		if (rc < 0)
+			CAM_ERR(CAM_CCI, "Failed rc %d", rc);
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_write_i2c_queue(struct cci_device *cci_dev,
+	uint32_t val,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+
+	if (!cci_dev) {
+		CAM_ERR(CAM_CCI, "Failed");
+		return -EINVAL;
+	}
+
+	rc = cam_cci_validate_queue(cci_dev, 1, master, queue);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Failed %d", rc);
+		return rc;
+	}
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+		CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset, val);
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset);
+	return rc;
+}
+
+static int32_t cam_cci_lock_queue(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue, uint32_t en)
+{
+	uint32_t val;
+
+	if (queue != PRIORITY_QUEUE)
+		return 0;
+
+	val = en ? CCI_I2C_LOCK_CMD : CCI_I2C_UNLOCK_CMD;
+	return cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+}
+
+#ifdef DUMP_CCI_REGISTERS
+static void cam_cci_dump_registers(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master, enum cci_i2c_queue_t queue)
+{
+	uint32_t read_val = 0;
+	uint32_t i = 0;
+	uint32_t reg_offset = 0;
+	void __iomem *base = cci_dev->soc_info.reg_map[0].mem_base;
+
+	/* CCI Top Registers */
+	CAM_INFO(CAM_CCI, "****CCI TOP Registers ****");
+	for (i = 0; i < DEBUG_TOP_REG_COUNT; i++) {
+		reg_offset = DEBUG_TOP_REG_START + i * 4;
+		read_val = cam_io_r_mb(base + reg_offset);
+		CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
+			reg_offset, read_val);
+	}
+
+	/* CCI Master registers */
+	CAM_INFO(CAM_CCI, "****CCI MASTER %d Registers ****",
+		master);
+	for (i = 0; i < DEBUG_MASTER_REG_COUNT; i++) {
+		reg_offset = DEBUG_MASTER_REG_START + master*0x100 + i * 4;
+		read_val = cam_io_r_mb(base + reg_offset);
+		CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
+			reg_offset, read_val);
+	}
+
+	/* CCI Master Queue registers */
+	CAM_INFO(CAM_CCI, " **** CCI MASTER%d QUEUE%d Registers ****",
+		master, queue);
+	for (i = 0; i < DEBUG_MASTER_QUEUE_REG_COUNT; i++) {
+		reg_offset = DEBUG_MASTER_QUEUE_REG_START +  master*0x200 +
+			queue*0x100 + i * 4;
+		read_val = cam_io_r_mb(base + reg_offset);
+		CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
+			reg_offset, read_val);
+	}
+
+	/* CCI Interrupt registers */
+	CAM_INFO(CAM_CCI, " ****CCI Interrupt Registers ****");
+	for (i = 0; i < DEBUG_INTR_REG_COUNT; i++) {
+		reg_offset = DEBUG_INTR_REG_START + i * 4;
+		read_val = cam_io_r_mb(base + reg_offset);
+		CAM_INFO(CAM_CCI, "offset = 0x%X value = 0x%X",
+			reg_offset, read_val);
+	}
+}
+#endif
+
+static uint32_t cam_cci_wait(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+
+	if (!cci_dev) {
+		CAM_ERR(CAM_CCI, "failed");
+		return -EINVAL;
+	}
+
+	rc = wait_for_completion_timeout(
+		&cci_dev->cci_master_info[master].report_q[queue], CCI_TIMEOUT);
+	CAM_DBG(CAM_CCI, "wait DONE_for_completion_timeout");
+
+	if (rc <= 0) {
+#ifdef DUMP_CCI_REGISTERS
+		cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+		CAM_ERR(CAM_CCI, "wait for queue: %d", queue);
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+		cam_cci_flush_queue(cci_dev, master);
+		return rc;
+	}
+	rc = cci_dev->cci_master_info[master].status;
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "failed rc %d", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static void cam_cci_load_report_cmd(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	uint32_t read_val = cam_io_r_mb(base +
+		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	uint32_t report_val = CCI_I2C_REPORT_CMD | (1 << 8);
+
+	CAM_DBG(CAM_CCI, "CCI_I2C_REPORT_CMD curr_w_cnt: %d", read_val);
+	cam_io_w_mb(report_val,
+		base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset);
+	read_val++;
+
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR %d", read_val);
+	cam_io_w_mb(read_val, base +
+		CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+}
+
+static int32_t cam_cci_wait_report_cmd(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	unsigned long flags;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+
+	uint32_t reg_val = 1 << ((master * 2) + queue);
+
+	cam_cci_load_report_cmd(cci_dev, master, queue);
+	spin_lock_irqsave(
+		&cci_dev->cci_master_info[master].lock_q[queue], flags);
+	atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+	atomic_set(&cci_dev->cci_master_info[master].done_pending[queue], 1);
+	spin_unlock_irqrestore(
+		&cci_dev->cci_master_info[master].lock_q[queue], flags);
+	cam_io_w_mb(reg_val, base +
+		CCI_QUEUE_START_ADDR);
+
+	return cam_cci_wait(cci_dev, master, queue);
+}
+
+static int32_t cam_cci_transfer_end(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(
+		&cci_dev->cci_master_info[master].lock_q[queue], flags);
+	if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[master].lock_q[queue], flags);
+		rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
+		if (rc < 0) {
+			CAM_ERR(CAM_CCI, "failed rc: %d", rc);
+			return rc;
+		}
+		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+		if (rc < 0) {
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
+			return rc;
+		}
+	} else {
+		atomic_set(
+			&cci_dev->cci_master_info[master].done_pending[queue],
+			1);
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[master].lock_q[queue], flags);
+		rc = cam_cci_wait(cci_dev, master, queue);
+		if (rc < 0) {
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
+			return rc;
+		}
+		rc = cam_cci_lock_queue(cci_dev, master, queue, 0);
+		if (rc < 0) {
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
+			return rc;
+		}
+		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+		if (rc < 0) {
+			CAM_ERR(CAM_CCI, "Failed rc %d", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_get_queue_free_size(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	uint32_t read_val = 0;
+	uint32_t reg_offset = master * 0x200 + queue * 0x100;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+
+	read_val = cam_io_r_mb(base +
+		CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR %d max %d", read_val,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size);
+	return ((cci_dev->cci_i2c_queue_info[master][queue].max_queue_size) -
+			read_val);
+}
+
+static void cam_cci_process_half_q(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	unsigned long flags;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+	uint32_t reg_val = 1 << ((master * 2) + queue);
+
+	spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue],
+		flags);
+	if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 0) {
+		cam_cci_load_report_cmd(cci_dev, master, queue);
+		atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 1);
+		cam_io_w_mb(reg_val, base +
+			CCI_QUEUE_START_ADDR);
+	}
+	spin_unlock_irqrestore(&cci_dev->cci_master_info[master].lock_q[queue],
+		flags);
+}
+
+static int32_t cam_cci_process_full_q(struct cci_device *cci_dev,
+	enum cci_i2c_master_t master,
+	enum cci_i2c_queue_t queue)
+{
+	int32_t rc = 0;
+	unsigned long flags;
+
+
+	spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue],
+		flags);
+	if (atomic_read(&cci_dev->cci_master_info[master].q_free[queue]) == 1) {
+		atomic_set(
+			&cci_dev->cci_master_info[master].done_pending[queue],
+			1);
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[master].lock_q[queue], flags);
+		rc = cam_cci_wait(cci_dev, master, queue);
+		if (rc < 0) {
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
+			return rc;
+		}
+	} else {
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[master].lock_q[queue], flags);
+		rc = cam_cci_wait_report_cmd(cci_dev, master, queue);
+		if (rc < 0) {
+			CAM_ERR(CAM_CCI, "failed rc %d", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_calc_cmd_len(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl, uint32_t cmd_size,
+	 struct cam_sensor_i2c_reg_array *i2c_cmd, uint32_t *pack)
+{
+	uint8_t i;
+	uint32_t len = 0;
+	uint8_t data_len = 0, addr_len = 0;
+	uint8_t pack_max_len;
+	struct cam_sensor_i2c_reg_setting *msg;
+	struct cam_sensor_i2c_reg_array *cmd = i2c_cmd;
+	uint32_t size = cmd_size;
+
+	if (!cci_dev || !c_ctrl) {
+		CAM_ERR(CAM_CCI, "failed");
+		return -EINVAL;
+	}
+
+	msg = &c_ctrl->cfg.cci_i2c_write_cfg;
+	*pack = 0;
+
+	if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ ||
+		c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) {
+		addr_len = cam_cci_convert_type_to_num_bytes(msg->addr_type);
+		len = (size + addr_len) <= (cci_dev->payload_size) ?
+			(size + addr_len):cci_dev->payload_size;
+	} else {
+		addr_len = cam_cci_convert_type_to_num_bytes(msg->addr_type);
+		data_len = cam_cci_convert_type_to_num_bytes(msg->data_type);
+		len = data_len + addr_len;
+		pack_max_len = size < (cci_dev->payload_size-len) ?
+			size : (cci_dev->payload_size-len);
+		for (i = 0; i < pack_max_len;) {
+			if (cmd->delay || ((cmd - i2c_cmd) >= (cmd_size - 1)))
+				break;
+			if (cmd->reg_addr + 1 ==
+				(cmd+1)->reg_addr) {
+				len += data_len;
+				if (len > cci_dev->payload_size) {
+					len = len - data_len;
+					break;
+				}
+				(*pack)++;
+			} else {
+				break;
+			}
+			i += data_len;
+			cmd++;
+		}
+	}
+
+	if (len > cci_dev->payload_size) {
+		CAM_ERR(CAM_CCI, "Len error: %d", len);
+		return -EINVAL;
+	}
+
+	len += 1; /*add i2c WR command*/
+	len = len/4 + 1;
+
+	return len;
+}
+
+static uint32_t cam_cci_cycles_per_ms(unsigned long clk)
+{
+	uint32_t cycles_per_us;
+
+	if (clk) {
+		cycles_per_us = ((clk/1000)*256)/1000;
+	} else {
+		CAM_ERR(CAM_CCI, "failed: Can use default: %d",
+			CYCLES_PER_MICRO_SEC_DEFAULT);
+		cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+	}
+
+	return cycles_per_us;
+}
+
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl)
+
+{
+	int32_t src_clk_idx, j;
+	uint32_t cci_clk_src;
+	unsigned long clk;
+	struct cam_cci_clk_params_t *clk_params = NULL;
+
+	enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+	struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
+
+	if (i2c_freq_mode >= I2C_MAX_MODES ||
+		i2c_freq_mode < I2C_STANDARD_MODE) {
+		CAM_ERR(CAM_CCI, "Invalid frequency mode: %d",
+			(int32_t)i2c_freq_mode);
+		cci_dev->clk_level_index = -1;
+		return;
+	}
+
+	clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+	cci_clk_src = clk_params->cci_clk_src;
+
+	src_clk_idx = soc_info->src_clk_idx;
+
+	if (src_clk_idx < 0) {
+		cci_dev->cycles_per_us = CYCLES_PER_MICRO_SEC_DEFAULT;
+		cci_dev->clk_level_index = 0;
+		return;
+	}
+
+	if (cci_clk_src == 0) {
+		clk = soc_info->clk_rate[0][src_clk_idx];
+		cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
+		cci_dev->clk_level_index = 0;
+		return;
+	}
+
+	for (j = 0; j < CAM_MAX_VOTE; j++) {
+		clk = soc_info->clk_rate[j][src_clk_idx];
+		if (clk == cci_clk_src) {
+			cci_dev->cycles_per_us = cam_cci_cycles_per_ms(clk);
+			cci_dev->clk_level_index = j;
+			return;
+		}
+	}
+}
+
+static int32_t cam_cci_set_clk_param(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	struct cam_cci_clk_params_t *clk_params = NULL;
+	enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+	enum i2c_freq_mode i2c_freq_mode = c_ctrl->cci_info->i2c_freq_mode;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+
+	if ((i2c_freq_mode >= I2C_MAX_MODES) || (i2c_freq_mode < 0)) {
+		CAM_ERR(CAM_CCI, "invalid i2c_freq_mode = %d", i2c_freq_mode);
+		return -EINVAL;
+	}
+
+	clk_params = &cci_dev->cci_clk_params[i2c_freq_mode];
+
+	if (cci_dev->i2c_freq_mode[master] == i2c_freq_mode)
+		return 0;
+	if (master == MASTER_0) {
+		cam_io_w_mb(clk_params->hw_thigh << 16 |
+			clk_params->hw_tlow,
+			base + CCI_I2C_M0_SCL_CTL_ADDR);
+		cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
+			clk_params->hw_tsu_sta,
+			base + CCI_I2C_M0_SDA_CTL_0_ADDR);
+		cam_io_w_mb(clk_params->hw_thd_dat << 16 |
+			clk_params->hw_thd_sta,
+			base + CCI_I2C_M0_SDA_CTL_1_ADDR);
+		cam_io_w_mb(clk_params->hw_tbuf,
+			base + CCI_I2C_M0_SDA_CTL_2_ADDR);
+		cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+			clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+			base + CCI_I2C_M0_MISC_CTL_ADDR);
+	} else if (master == MASTER_1) {
+		cam_io_w_mb(clk_params->hw_thigh << 16 |
+			clk_params->hw_tlow,
+			base + CCI_I2C_M1_SCL_CTL_ADDR);
+		cam_io_w_mb(clk_params->hw_tsu_sto << 16 |
+			clk_params->hw_tsu_sta,
+			base + CCI_I2C_M1_SDA_CTL_0_ADDR);
+		cam_io_w_mb(clk_params->hw_thd_dat << 16 |
+			clk_params->hw_thd_sta,
+			base + CCI_I2C_M1_SDA_CTL_1_ADDR);
+		cam_io_w_mb(clk_params->hw_tbuf,
+			base + CCI_I2C_M1_SDA_CTL_2_ADDR);
+		cam_io_w_mb(clk_params->hw_scl_stretch_en << 8 |
+			clk_params->hw_trdhld << 4 | clk_params->hw_tsp,
+			base + CCI_I2C_M1_MISC_CTL_ADDR);
+	}
+	cci_dev->i2c_freq_mode[master] = i2c_freq_mode;
+
+	return 0;
+}
+
+static int32_t cam_cci_data_queue(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+	enum cci_i2c_sync sync_en)
+{
+	uint16_t i = 0, j = 0, k = 0, h = 0, len = 0;
+	int32_t rc = 0, free_size = 0, en_seq_write = 0;
+	uint8_t data[12];
+	struct cam_sensor_i2c_reg_setting *i2c_msg =
+		&c_ctrl->cfg.cci_i2c_write_cfg;
+	struct cam_sensor_i2c_reg_array *i2c_cmd = i2c_msg->reg_setting;
+	enum cci_i2c_master_t master = c_ctrl->cci_info->cci_i2c_master;
+	uint16_t reg_addr = 0, cmd_size = i2c_msg->size;
+	uint32_t read_val = 0, reg_offset, val, delay = 0;
+	uint32_t max_queue_size, queue_size = 0, cmd = 0;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+	unsigned long flags;
+
+	if (i2c_cmd == NULL) {
+		CAM_ERR(CAM_CCI, "Failed: i2c cmd is NULL");
+		return -EINVAL;
+	}
+
+	if ((!cmd_size) || (cmd_size > CCI_I2C_MAX_WRITE)) {
+		CAM_ERR(CAM_CCI, "failed: invalid cmd_size %d",
+			cmd_size);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_CCI, "addr type %d data type %d cmd_size %d",
+		i2c_msg->addr_type, i2c_msg->data_type, cmd_size);
+
+	if (i2c_msg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_CCI, "failed: invalid addr_type 0x%X",
+			i2c_msg->addr_type);
+		return -EINVAL;
+	}
+	if (i2c_msg->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_CCI, "failed: invalid data_type 0x%X",
+			i2c_msg->data_type);
+		return -EINVAL;
+	}
+	reg_offset = master * 0x200 + queue * 0x100;
+
+	cam_io_w_mb(cci_dev->cci_wait_sync_cfg.cid,
+		base + CCI_SET_CID_SYNC_TIMER_ADDR +
+		cci_dev->cci_wait_sync_cfg.csid *
+		CCI_SET_CID_SYNC_TIMER_OFFSET);
+
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+
+	CAM_DBG(CAM_CCI, "CCI_I2C_M0_Q0_LOAD_DATA_ADDR:val 0x%x:0x%x",
+		CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset, val);
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+		reg_offset);
+
+	spin_lock_irqsave(&cci_dev->cci_master_info[master].lock_q[queue],
+		flags);
+	atomic_set(&cci_dev->cci_master_info[master].q_free[queue], 0);
+	spin_unlock_irqrestore(&cci_dev->cci_master_info[master].lock_q[queue],
+		flags);
+
+	max_queue_size =
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size;
+
+	if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ)
+		queue_size = max_queue_size;
+	else
+		queue_size = max_queue_size/2;
+	reg_addr = i2c_cmd->reg_addr;
+
+	if (sync_en == MSM_SYNC_ENABLE && cci_dev->valid_sync &&
+		cmd_size < max_queue_size) {
+		val = CCI_I2C_WAIT_SYNC_CMD |
+			((cci_dev->cci_wait_sync_cfg.line) << 4);
+		cam_io_w_mb(val,
+			base + CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+			reg_offset);
+	}
+
+	rc = cam_cci_lock_queue(cci_dev, master, queue, 1);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "failed line %d", rc);
+		return rc;
+	}
+
+	while (cmd_size) {
+		uint32_t pack = 0;
+
+		len = cam_cci_calc_cmd_len(cci_dev, c_ctrl, cmd_size,
+			i2c_cmd, &pack);
+		if (len <= 0) {
+			CAM_ERR(CAM_CCI, "failed");
+			return -EINVAL;
+		}
+
+		read_val = cam_io_r_mb(base +
+			CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+		CAM_DBG(CAM_CCI, "CUR_WORD_CNT_ADDR %d len %d max %d",
+			read_val, len, max_queue_size);
+		/* + 1 - space alocation for Report CMD */
+		if ((read_val + len + 1) > queue_size) {
+			if ((read_val + len + 1) > max_queue_size) {
+				rc = cam_cci_process_full_q(cci_dev,
+					master, queue);
+				if (rc < 0) {
+					CAM_ERR(CAM_CCI, "failed rc: %d", rc);
+					return rc;
+				}
+				continue;
+			}
+			cam_cci_process_half_q(cci_dev, master, queue);
+		}
+
+		CAM_DBG(CAM_CCI, "cmd_size %d addr 0x%x data 0x%x",
+			cmd_size, i2c_cmd->reg_addr, i2c_cmd->reg_data);
+		delay = i2c_cmd->delay;
+		i = 0;
+		data[i++] = CCI_I2C_WRITE_CMD;
+
+		/*
+		 * in case of multiple command
+		 * MSM_CCI_I2C_WRITE : address is not continuous, so update
+		 *	address for a new packet.
+		 * MSM_CCI_I2C_WRITE_SEQ : address is continuous, need to keep
+		 *	the incremented address for a
+		 *	new packet
+		 */
+		if (c_ctrl->cmd == MSM_CCI_I2C_WRITE ||
+			c_ctrl->cmd == MSM_CCI_I2C_WRITE_ASYNC ||
+			c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC ||
+			c_ctrl->cmd == MSM_CCI_I2C_WRITE_SYNC_BLOCK)
+			reg_addr = i2c_cmd->reg_addr;
+
+		if (en_seq_write == 0) {
+			/* either byte or word addr */
+			if (i2c_msg->addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+				data[i++] = reg_addr;
+			else {
+				data[i++] = (reg_addr & 0xFF00) >> 8;
+				data[i++] = reg_addr & 0x00FF;
+			}
+		}
+		/* max of 10 data bytes */
+		do {
+			if (i2c_msg->data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+				data[i++] = i2c_cmd->reg_data;
+				if (c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ)
+					reg_addr++;
+			} else {
+				if ((i + 1) <= cci_dev->payload_size) {
+					switch (i2c_msg->data_type) {
+					case CAMERA_SENSOR_I2C_TYPE_DWORD:
+						data[i++] = (i2c_cmd->reg_data &
+							0xFF000000) >> 24;
+						/* fallthrough */
+					case CAMERA_SENSOR_I2C_TYPE_3B:
+						data[i++] = (i2c_cmd->reg_data &
+							0x00FF0000) >> 16;
+						/* fallthrough */
+					case CAMERA_SENSOR_I2C_TYPE_WORD:
+						data[i++] = (i2c_cmd->reg_data &
+							0x0000FF00) >> 8;
+						/* fallthrough */
+					case CAMERA_SENSOR_I2C_TYPE_BYTE:
+						data[i++] = i2c_cmd->reg_data &
+							0x000000FF;
+						break;
+					default:
+						CAM_ERR(CAM_CCI,
+							"invalid data type: %d",
+							i2c_msg->data_type);
+						return -EINVAL;
+					}
+
+					if (c_ctrl->cmd ==
+						MSM_CCI_I2C_WRITE_SEQ)
+						reg_addr++;
+				} else
+					break;
+			}
+			i2c_cmd++;
+			--cmd_size;
+		} while (((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ ||
+			c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) || pack--) &&
+				(cmd_size > 0) && (i <= cci_dev->payload_size));
+		free_size = cam_cci_get_queue_free_size(cci_dev, master,
+				queue);
+		if ((c_ctrl->cmd == MSM_CCI_I2C_WRITE_SEQ ||
+			c_ctrl->cmd == MSM_CCI_I2C_WRITE_BURST) &&
+			((i-1) == MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11) &&
+			cci_dev->support_seq_write && cmd_size > 0 &&
+			free_size > BURST_MIN_FREE_SIZE) {
+			data[0] |= 0xF0;
+			en_seq_write = 1;
+		} else {
+			data[0] |= ((i-1) << 4);
+			en_seq_write = 0;
+		}
+		len = ((i-1)/4) + 1;
+
+		read_val = cam_io_r_mb(base +
+			CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR + reg_offset);
+		for (h = 0, k = 0; h < len; h++) {
+			cmd = 0;
+			for (j = 0; (j < 4 && k < i); j++)
+				cmd |= (data[k++] << (j * 8));
+			CAM_DBG(CAM_CCI,
+				"LOAD_DATA_ADDR 0x%x, q: %d, len:%d, cnt: %d",
+				cmd, queue, len, read_val);
+			cam_io_w_mb(cmd, base +
+				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+				master * 0x200 + queue * 0x100);
+
+			read_val += 1;
+			cam_io_w_mb(read_val, base +
+				CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		}
+
+		if ((delay > 0) && (delay < CCI_MAX_DELAY) &&
+			en_seq_write == 0) {
+			cmd = (uint32_t)((delay * cci_dev->cycles_per_us) /
+				0x100);
+			cmd <<= 4;
+			cmd |= CCI_I2C_WAIT_CMD;
+			CAM_DBG(CAM_CCI,
+				"CCI_I2C_M0_Q0_LOAD_DATA_ADDR 0x%x", cmd);
+			cam_io_w_mb(cmd, base +
+				CCI_I2C_M0_Q0_LOAD_DATA_ADDR +
+				master * 0x200 + queue * 0x100);
+			read_val += 1;
+			cam_io_w_mb(read_val, base +
+				CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR + reg_offset);
+		}
+	}
+
+	rc = cam_cci_transfer_end(cci_dev, master, queue);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "failed rc %d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_burst_read(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	uint32_t val = 0, i = 0, j = 0, irq_mask_update = 0;
+	unsigned long rem_jiffies, flags;
+	int32_t read_words = 0, exp_words = 0;
+	int32_t index = 0, first_byte = 0, total_read_words = 0;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_1;
+	struct cci_device                  *cci_dev = NULL;
+	struct cam_cci_read_cfg            *read_cfg = NULL;
+	struct cam_hw_soc_info             *soc_info = NULL;
+	void __iomem                       *base = NULL;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+		|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+		return -EINVAL;
+	}
+
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+	mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+	/*
+	 * Todo: If there is a change in frequency of operation
+	 * Wait for previos transaction to complete
+	 */
+
+	/* Set the I2C Frequency */
+	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+		goto rel_mutex;
+	}
+
+	/*
+	 * Call validate queue to make sure queue is empty before starting.
+	 * If this call fails, don't proceed with i2c_read call. This is to
+	 * avoid overflow / underflow of queue
+	 */
+	rc = cam_cci_validate_queue(cci_dev,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+		master, queue);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
+		goto rel_mutex;
+	}
+
+	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+		CAM_ERR(CAM_CCI, "More than max retries");
+		goto rel_mutex;
+	}
+
+	if (read_cfg->data == NULL) {
+		CAM_ERR(CAM_CCI, "Data ptr is NULL");
+		goto rel_mutex;
+	}
+
+	if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+			read_cfg->addr_type);
+		rc = -EINVAL;
+		goto rel_mutex;
+	}
+
+	CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+		c_ctrl->cci_info->id_map);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+	for (i = 0; i < read_cfg->addr_type; i++) {
+		val |= ((read_cfg->addr >> (i << 3)) & 0xFF)  <<
+		((read_cfg->addr_type - i) << 3);
+	}
+
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_UNLOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+	CAM_DBG(CAM_CCI, "cur word cnt 0x%x", val);
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+
+	exp_words = ((read_cfg->num_byte / 4) + 1);
+	CAM_DBG(CAM_CCI, "waiting for threshold [exp_words %d]", exp_words);
+
+	while (total_read_words != exp_words) {
+		rem_jiffies = wait_for_completion_timeout(
+			&cci_dev->cci_master_info[master].th_complete,
+			CCI_TIMEOUT);
+		if (!rem_jiffies) {
+			rc = -ETIMEDOUT;
+			val = cam_io_r_mb(base +
+				CCI_I2C_M0_READ_BUF_LEVEL_ADDR +
+				master * 0x100);
+			CAM_ERR(CAM_CCI,
+				"wait_for_completion_timeout rc = %d FIFO buf_lvl:0x%x",
+				rc, val);
+#ifdef DUMP_CCI_REGISTERS
+			cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+			cam_cci_flush_queue(cci_dev, master);
+			goto rel_mutex;
+		}
+
+		read_words = cam_io_r_mb(base +
+			CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+		if (read_words <= 0) {
+			CAM_DBG(CAM_CCI, "FIFO Buffer lvl is 0");
+			continue;
+		}
+
+		j++;
+		CAM_DBG(CAM_CCI, "Iteration: %u read_words %d", j, read_words);
+
+		total_read_words += read_words;
+		while (read_words > 0) {
+			val = cam_io_r_mb(base +
+				CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+			for (i = 0; (i < 4) &&
+				(index < read_cfg->num_byte); i++) {
+				CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
+				if (!first_byte) {
+					CAM_DBG(CAM_CCI, "sid 0x%x",
+						val & 0xFF);
+					first_byte++;
+				} else {
+					read_cfg->data[index] =
+						(val  >> (i * 8)) & 0xFF;
+					CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
+						read_cfg->data[index]);
+					index++;
+				}
+			}
+			read_words--;
+		}
+
+		CAM_DBG(CAM_CCI, "Iteraion:%u total_read_words %d",
+			j, total_read_words);
+
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
+		if (cci_dev->irqs_disabled) {
+			irq_mask_update =
+				cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR) |
+				CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+			if (master == MASTER_0 && cci_dev->irqs_disabled &
+				CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)
+				irq_mask_update |=
+					CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+			else if (master == MASTER_1 && cci_dev->irqs_disabled &
+				CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)
+				irq_mask_update |=
+					CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+			cam_io_w_mb(irq_mask_update,
+				base + CCI_IRQ_MASK_1_ADDR);
+		}
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+
+		if (total_read_words == exp_words) {
+		   /*
+		    * This wait is for RD_DONE irq, if RD_DONE is
+		    * triggered we will call complete on both threshold
+		    * & read done waits. As part of the threshold wait
+		    * we will be draining the entire buffer out. This
+		    * wait is to compensate for the complete invoked for
+		    * RD_DONE exclusively.
+		    */
+			rem_jiffies = wait_for_completion_timeout(
+			&cci_dev->cci_master_info[master].reset_complete,
+			CCI_TIMEOUT);
+			if (!rem_jiffies) {
+				rc = -ETIMEDOUT;
+				val = cam_io_r_mb(base +
+					CCI_I2C_M0_READ_BUF_LEVEL_ADDR +
+					master * 0x100);
+				CAM_ERR(CAM_CCI,
+					"Failed to receive RD_DONE irq rc = %d FIFO buf_lvl:0x%x",
+					rc, val);
+				#ifdef DUMP_CCI_REGISTERS
+					cam_cci_dump_registers(cci_dev,
+						master, queue);
+				#endif
+					cam_cci_flush_queue(cci_dev, master);
+				goto rel_mutex;
+			}
+			break;
+		}
+	}
+
+	CAM_DBG(CAM_CCI, "Burst read successful words_read %d",
+		total_read_words);
+
+rel_mutex:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+	return rc;
+}
+
+static int32_t cam_cci_read(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+	int32_t read_words = 0, exp_words = 0;
+	int32_t index = 0, first_byte = 0;
+	uint32_t i = 0;
+	enum cci_i2c_master_t master;
+	enum cci_i2c_queue_t queue = QUEUE_1;
+	struct cci_device *cci_dev = NULL;
+	struct cam_cci_read_cfg *read_cfg = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+	void __iomem *base = NULL;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+		|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+		return -EINVAL;
+	}
+
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+
+	mutex_lock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+
+	/*
+	 * Todo: If there is a change in frequency of operation
+	 * Wait for previos transaction to complete
+	 */
+
+	/* Set the I2C Frequency */
+	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+		goto rel_mutex;
+	}
+
+	/*
+	 * Call validate queue to make sure queue is empty before starting.
+	 * If this call fails, don't proceed with i2c_read call. This is to
+	 * avoid overflow / underflow of queue
+	 */
+	rc = cam_cci_validate_queue(cci_dev,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size - 1,
+		master, queue);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Initial validataion failed rc %d", rc);
+		goto rel_mutex;
+	}
+
+	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+		CAM_ERR(CAM_CCI, "More than max retries");
+		goto rel_mutex;
+	}
+
+	if (read_cfg->data == NULL) {
+		CAM_ERR(CAM_CCI, "Data ptr is NULL");
+		goto rel_mutex;
+	}
+
+	CAM_DBG(CAM_CCI, "master %d, queue %d", master, queue);
+	CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+		c_ctrl->cci_info->id_map);
+	val = CCI_I2C_SET_PARAM_CMD | c_ctrl->cci_info->sid << 4 |
+		c_ctrl->cci_info->retries << 16 |
+		c_ctrl->cci_info->id_map << 18;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_LOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	if (read_cfg->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_CCI, "failed : Invalid addr type: %u",
+			read_cfg->addr_type);
+		rc = -EINVAL;
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+	for (i = 0; i < read_cfg->addr_type; i++) {
+		val |= ((read_cfg->addr >> (i << 3)) & 0xFF)  <<
+		((read_cfg->addr_type - i) << 3);
+	}
+
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_READ_CMD | (read_cfg->num_byte << 4);
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = CCI_I2C_UNLOCK_CMD;
+	rc = cam_cci_write_i2c_queue(cci_dev, val, master, queue);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "failed rc: %d", rc);
+		goto rel_mutex;
+	}
+
+	val = cam_io_r_mb(base + CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+	CAM_DBG(CAM_CCI, "cur word cnt 0x%x", val);
+	cam_io_w_mb(val, base + CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR
+			+ master * 0x200 + queue * 0x100);
+
+	val = 1 << ((master * 2) + queue);
+	cam_io_w_mb(val, base + CCI_QUEUE_START_ADDR);
+	CAM_DBG(CAM_CCI,
+		"waiting_for_rd_done [exp_words: %d]", exp_words);
+
+	rc = wait_for_completion_timeout(
+		&cci_dev->cci_master_info[master].reset_complete, CCI_TIMEOUT);
+	if (rc <= 0) {
+#ifdef DUMP_CCI_REGISTERS
+		cam_cci_dump_registers(cci_dev, master, queue);
+#endif
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+		val = cam_io_r_mb(base +
+			CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+		CAM_ERR(CAM_CCI,
+			"wait_for_completion_timeout rc = %d FIFO buf_lvl: 0x%x",
+			rc, val);
+		cam_cci_flush_queue(cci_dev, master);
+		goto rel_mutex;
+	} else {
+		rc = 0;
+	}
+
+	read_words = cam_io_r_mb(base +
+		CCI_I2C_M0_READ_BUF_LEVEL_ADDR + master * 0x100);
+	exp_words = ((read_cfg->num_byte / 4) + 1);
+	if (read_words != exp_words) {
+		CAM_ERR(CAM_CCI, "read_words = %d, exp words = %d",
+			read_words, exp_words);
+		memset(read_cfg->data, 0, read_cfg->num_byte);
+		rc = -EINVAL;
+		goto rel_mutex;
+	}
+	index = 0;
+	CAM_DBG(CAM_CCI, "index %d num_type %d", index, read_cfg->num_byte);
+	first_byte = 0;
+	while (read_words > 0) {
+		val = cam_io_r_mb(base +
+			CCI_I2C_M0_READ_DATA_ADDR + master * 0x100);
+		CAM_DBG(CAM_CCI, "read val 0x%x", val);
+		for (i = 0; (i < 4) && (index < read_cfg->num_byte); i++) {
+			CAM_DBG(CAM_CCI, "i:%d index:%d", i, index);
+			if (!first_byte) {
+				CAM_DBG(CAM_CCI, "sid 0x%x", val & 0xFF);
+				first_byte++;
+			} else {
+				read_cfg->data[index] =
+					(val  >> (i * 8)) & 0xFF;
+				CAM_DBG(CAM_CCI, "data[%d] 0x%x", index,
+					read_cfg->data[index]);
+				index++;
+			}
+		}
+		read_words--;
+	}
+rel_mutex:
+	mutex_unlock(&cci_dev->cci_master_info[master].mutex_q[queue]);
+	return rc;
+}
+
+static int32_t cam_cci_i2c_write(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+	enum cci_i2c_sync sync_en)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+	enum cci_i2c_master_t master;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+
+	if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+		CAM_ERR(CAM_CCI, "invalid cci state %d",
+			cci_dev->cci_state);
+		return -EINVAL;
+	}
+	master = c_ctrl->cci_info->cci_i2c_master;
+	CAM_DBG(CAM_CCI, "set param sid 0x%x retries %d id_map %d",
+		c_ctrl->cci_info->sid, c_ctrl->cci_info->retries,
+		c_ctrl->cci_info->id_map);
+
+	/* Set the I2C Frequency */
+	rc = cam_cci_set_clk_param(cci_dev, c_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "cam_cci_set_clk_param failed rc = %d", rc);
+		return rc;
+	}
+	/*
+	 * Call validate queue to make sure queue is empty before starting.
+	 * If this call fails, don't proceed with i2c_write call. This is to
+	 * avoid overflow / underflow of queue
+	 */
+	rc = cam_cci_validate_queue(cci_dev,
+		cci_dev->cci_i2c_queue_info[master][queue].max_queue_size-1,
+		master, queue);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Initial validataion failed rc %d",
+			rc);
+		return rc;
+	}
+	if (c_ctrl->cci_info->retries > CCI_I2C_READ_MAX_RETRIES) {
+		CAM_ERR(CAM_CCI, "More than max retries");
+		return rc;
+	}
+	rc = cam_cci_data_queue(cci_dev, c_ctrl, queue, sync_en);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "failed rc: %d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void cam_cci_write_async_helper(struct work_struct *work)
+{
+	int rc;
+	struct cci_device *cci_dev;
+	struct cci_write_async *write_async =
+		container_of(work, struct cci_write_async, work);
+	struct cam_sensor_i2c_reg_setting *i2c_msg;
+	enum cci_i2c_master_t master;
+	struct cam_cci_master_info *cci_master_info;
+
+	cci_dev = write_async->cci_dev;
+	i2c_msg = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+	master = write_async->c_ctrl.cci_info->cci_i2c_master;
+	cci_master_info = &cci_dev->cci_master_info[master];
+
+	mutex_lock(&cci_master_info->mutex_q[write_async->queue]);
+	rc = cam_cci_i2c_write(&(cci_dev->v4l2_dev_str.sd),
+		&write_async->c_ctrl, write_async->queue, write_async->sync_en);
+	mutex_unlock(&cci_master_info->mutex_q[write_async->queue]);
+	if (rc < 0)
+		CAM_ERR(CAM_CCI, "failed rc: %d", rc);
+
+	kfree(write_async->c_ctrl.cfg.cci_i2c_write_cfg.reg_setting);
+	kfree(write_async);
+}
+
+static int32_t cam_cci_i2c_write_async(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl, enum cci_i2c_queue_t queue,
+	enum cci_i2c_sync sync_en)
+{
+	int32_t rc = 0;
+	struct cci_write_async *write_async;
+	struct cci_device *cci_dev;
+	struct cam_sensor_i2c_reg_setting *cci_i2c_write_cfg;
+	struct cam_sensor_i2c_reg_setting *cci_i2c_write_cfg_w;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+
+	write_async = kzalloc(sizeof(*write_async), GFP_KERNEL);
+	if (!write_async)
+		return -ENOMEM;
+
+
+	INIT_WORK(&write_async->work, cam_cci_write_async_helper);
+	write_async->cci_dev = cci_dev;
+	write_async->c_ctrl = *c_ctrl;
+	write_async->queue = queue;
+	write_async->sync_en = sync_en;
+
+	cci_i2c_write_cfg = &c_ctrl->cfg.cci_i2c_write_cfg;
+	cci_i2c_write_cfg_w = &write_async->c_ctrl.cfg.cci_i2c_write_cfg;
+
+	if (cci_i2c_write_cfg->size == 0) {
+		kfree(write_async);
+		return -EINVAL;
+	}
+
+	cci_i2c_write_cfg_w->reg_setting =
+		kzalloc(sizeof(struct cam_sensor_i2c_reg_array)*
+		cci_i2c_write_cfg->size, GFP_KERNEL);
+	if (!cci_i2c_write_cfg_w->reg_setting) {
+		CAM_ERR(CAM_CCI, "Couldn't allocate memory");
+		kfree(write_async);
+		return -ENOMEM;
+	}
+	memcpy(cci_i2c_write_cfg_w->reg_setting,
+		cci_i2c_write_cfg->reg_setting,
+		(sizeof(struct cam_sensor_i2c_reg_array)*
+						cci_i2c_write_cfg->size));
+
+	cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type;
+	cci_i2c_write_cfg_w->addr_type = cci_i2c_write_cfg->addr_type;
+	cci_i2c_write_cfg_w->data_type = cci_i2c_write_cfg->data_type;
+	cci_i2c_write_cfg_w->size = cci_i2c_write_cfg->size;
+	cci_i2c_write_cfg_w->delay = cci_i2c_write_cfg->delay;
+
+	queue_work(cci_dev->write_wq[write_async->queue], &write_async->work);
+
+	return rc;
+}
+
+static int32_t cam_cci_read_bytes(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev = NULL;
+	enum cci_i2c_master_t master;
+	struct cam_cci_read_cfg *read_cfg = NULL;
+	uint16_t read_bytes = 0;
+
+	if (!sd || !c_ctrl) {
+		CAM_ERR(CAM_CCI, "sd %pK c_ctrl %pK", sd, c_ctrl);
+		return -EINVAL;
+	}
+	if (!c_ctrl->cci_info) {
+		CAM_ERR(CAM_CCI, "cci_info NULL");
+		return -EINVAL;
+	}
+	cci_dev = v4l2_get_subdevdata(sd);
+	if (!cci_dev) {
+		CAM_ERR(CAM_CCI, "cci_dev NULL");
+		return -EINVAL;
+	}
+	if (cci_dev->cci_state != CCI_STATE_ENABLED) {
+		CAM_ERR(CAM_CCI, "invalid cci state %d", cci_dev->cci_state);
+		return -EINVAL;
+	}
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+			|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+		return -EINVAL;
+	}
+
+	master = c_ctrl->cci_info->cci_i2c_master;
+	read_cfg = &c_ctrl->cfg.cci_i2c_read_cfg;
+	if ((!read_cfg->num_byte) || (read_cfg->num_byte > CCI_I2C_MAX_READ)) {
+		CAM_ERR(CAM_CCI, "read num bytes 0");
+		rc = -EINVAL;
+		goto ERROR;
+	}
+
+	read_bytes = read_cfg->num_byte;
+
+	/*
+	 * To avoid any conflicts due to back to back trigger of
+	 * THRESHOLD irq's, we reinit the threshold wait before
+	 * we load the burst read cmd.
+	 */
+	reinit_completion(&cci_dev->cci_master_info[master].th_complete);
+
+	CAM_DBG(CAM_CCI, "Bytes to read %u", read_bytes);
+	do {
+		if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT)
+			read_cfg->num_byte = CCI_I2C_MAX_BYTE_COUNT;
+		else
+			read_cfg->num_byte = read_bytes;
+
+		if (read_cfg->num_byte >= CCI_READ_MAX) {
+			cci_dev->is_burst_read = true;
+			rc = cam_cci_burst_read(sd, c_ctrl);
+		} else {
+			cci_dev->is_burst_read = false;
+			rc = cam_cci_read(sd, c_ctrl);
+		}
+		if (rc) {
+			CAM_ERR(CAM_CCI, "failed to read rc:%d", rc);
+			goto ERROR;
+		}
+
+		if (read_bytes >= CCI_I2C_MAX_BYTE_COUNT) {
+			read_cfg->addr += (CCI_I2C_MAX_BYTE_COUNT /
+				read_cfg->data_type);
+			read_cfg->data += CCI_I2C_MAX_BYTE_COUNT;
+			read_bytes -= CCI_I2C_MAX_BYTE_COUNT;
+		} else {
+			read_bytes = 0;
+		}
+	} while (read_bytes);
+
+ERROR:
+	cci_dev->is_burst_read = false;
+	return rc;
+}
+
+static int32_t cam_cci_i2c_set_sync_prms(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	if (!cci_dev || !c_ctrl) {
+		CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+			cci_dev, c_ctrl);
+		rc = -EINVAL;
+		return rc;
+	}
+	cci_dev->cci_wait_sync_cfg = c_ctrl->cfg.cci_wait_sync_cfg;
+	cci_dev->valid_sync = cci_dev->cci_wait_sync_cfg.csid < 0 ? 0 : 1;
+
+	return rc;
+}
+
+static int32_t cam_cci_release(struct v4l2_subdev *sd)
+{
+	uint8_t rc = 0;
+	struct cci_device *cci_dev;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+
+	rc = cam_cci_soc_release(cci_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Failed in releasing the cci: %d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static int32_t cam_cci_write(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+	enum cci_i2c_master_t master;
+	struct cam_cci_master_info *cci_master_info;
+	uint32_t i;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	if (!cci_dev || !c_ctrl) {
+		CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+			cci_dev, c_ctrl);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	master = c_ctrl->cci_info->cci_i2c_master;
+
+	if (c_ctrl->cci_info->cci_i2c_master >= MASTER_MAX
+		|| c_ctrl->cci_info->cci_i2c_master < 0) {
+		CAM_ERR(CAM_CCI, "Invalid I2C master addr");
+		return -EINVAL;
+	}
+
+	cci_master_info = &cci_dev->cci_master_info[master];
+
+	switch (c_ctrl->cmd) {
+	case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+		mutex_lock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+		rc = cam_cci_i2c_write(sd, c_ctrl,
+			SYNC_QUEUE, MSM_SYNC_ENABLE);
+		mutex_unlock(&cci_master_info->mutex_q[SYNC_QUEUE]);
+		break;
+	case MSM_CCI_I2C_WRITE_SYNC:
+		rc = cam_cci_i2c_write_async(sd, c_ctrl,
+			SYNC_QUEUE, MSM_SYNC_ENABLE);
+		break;
+	case MSM_CCI_I2C_WRITE:
+	case MSM_CCI_I2C_WRITE_SEQ:
+	case MSM_CCI_I2C_WRITE_BURST:
+		for (i = 0; i < NUM_QUEUES; i++) {
+			if (mutex_trylock(&cci_master_info->mutex_q[i])) {
+				rc = cam_cci_i2c_write(sd, c_ctrl, i,
+					MSM_SYNC_DISABLE);
+				mutex_unlock(&cci_master_info->mutex_q[i]);
+				return rc;
+			}
+		}
+		mutex_lock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+		rc = cam_cci_i2c_write(sd, c_ctrl,
+			PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+		mutex_unlock(&cci_master_info->mutex_q[PRIORITY_QUEUE]);
+		break;
+	case MSM_CCI_I2C_WRITE_ASYNC:
+		rc = cam_cci_i2c_write_async(sd, c_ctrl,
+			PRIORITY_QUEUE, MSM_SYNC_DISABLE);
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+
+	return rc;
+}
+
+int32_t cam_cci_core_cfg(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *cci_ctrl)
+{
+	int32_t rc = 0;
+
+	CAM_DBG(CAM_CCI, "cmd %d", cci_ctrl->cmd);
+	switch (cci_ctrl->cmd) {
+	case MSM_CCI_INIT:
+		rc = cam_cci_init(sd, cci_ctrl);
+		break;
+	case MSM_CCI_RELEASE:
+		rc = cam_cci_release(sd);
+		break;
+	case MSM_CCI_I2C_READ:
+		rc = cam_cci_read_bytes(sd, cci_ctrl);
+		break;
+	case MSM_CCI_I2C_WRITE:
+	case MSM_CCI_I2C_WRITE_SEQ:
+	case MSM_CCI_I2C_WRITE_BURST:
+	case MSM_CCI_I2C_WRITE_SYNC:
+	case MSM_CCI_I2C_WRITE_ASYNC:
+	case MSM_CCI_I2C_WRITE_SYNC_BLOCK:
+		rc = cam_cci_write(sd, cci_ctrl);
+		break;
+	case MSM_CCI_GPIO_WRITE:
+		break;
+	case MSM_CCI_SET_SYNC_CID:
+		rc = cam_cci_i2c_set_sync_prms(sd, cci_ctrl);
+		break;
+
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+
+	cci_ctrl->status = rc;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
new file mode 100644
index 0000000..5d5a66d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_core.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_CCI_CORE_H_
+#define _CAM_CCI_CORE_H_
+
+#include <linux/irqreturn.h>
+#include <media/cam_sensor.h>
+#include "cam_cci_dev.h"
+#include "cam_cci_soc.h"
+
+/**
+ * @cci_dev: CCI device structure
+ * @c_ctrl: CCI control structure
+ *
+ * This API gets CCI clk rates
+ */
+void cam_cci_get_clk_rates(struct cci_device *cci_dev,
+	struct cam_cci_ctrl *c_ctrl);
+
+/**
+ * @sd: V4L2 sub device
+ * @c_ctrl: CCI control structure
+ *
+ * This API handles I2C operations for CCI
+ */
+int32_t cam_cci_core_cfg(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *cci_ctrl);
+
+/**
+ * @irq_num: IRQ number
+ * @data: CCI private structure
+ *
+ * This API handles CCI IRQs
+ */
+irqreturn_t cam_cci_irq(int irq_num, void *data);
+
+#endif /* _CAM_CCI_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
new file mode 100644
index 0000000..6b894db
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_cci_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_cci_soc.h"
+#include "cam_cci_core.h"
+
+#define CCI_MAX_DELAY 1000000
+
+static struct v4l2_subdev *g_cci_subdev[MAX_CCI];
+
+struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index)
+{
+	if (cci_dev_index < MAX_CCI)
+		return g_cci_subdev[cci_dev_index];
+	return NULL;
+}
+
+static long cam_cci_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int32_t rc = 0;
+
+	if (arg == NULL) {
+		CAM_ERR(CAM_CCI, "Invalid Args");
+		return rc;
+	}
+
+	switch (cmd) {
+	case VIDIOC_MSM_CCI_CFG:
+		rc = cam_cci_core_cfg(sd, arg);
+		break;
+	case VIDIOC_CAM_CONTROL:
+		break;
+	default:
+		CAM_ERR(CAM_CCI, "Invalid ioctl cmd: %d", cmd);
+		rc = -ENOIOCTLCMD;
+	}
+
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cci_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	return cam_cci_subdev_ioctl(sd, cmd, NULL);
+}
+#endif
+
+irqreturn_t cam_cci_irq(int irq_num, void *data)
+{
+	uint32_t irq_status0, irq_status1, reg_bmsk;
+	uint32_t irq_update_rd_done = 0;
+	struct cci_device *cci_dev = data;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+	void __iomem *base = soc_info->reg_map[0].mem_base;
+	unsigned long flags;
+	bool rd_done_th_assert = false;
+
+	irq_status0 = cam_io_r_mb(base + CCI_IRQ_STATUS_0_ADDR);
+	irq_status1 = cam_io_r_mb(base + CCI_IRQ_STATUS_1_ADDR);
+	CAM_DBG(CAM_CCI, "irq0:%x irq1:%x", irq_status0, irq_status1);
+
+	if (irq_status0 & CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK) {
+		if (cci_dev->cci_master_info[MASTER_0].reset_pending == TRUE) {
+			cci_dev->cci_master_info[MASTER_0].reset_pending =
+				FALSE;
+			complete(
+			&cci_dev->cci_master_info[MASTER_0].reset_complete);
+		}
+		if (cci_dev->cci_master_info[MASTER_1].reset_pending == TRUE) {
+			cci_dev->cci_master_info[MASTER_1].reset_pending =
+				FALSE;
+			complete(
+			&cci_dev->cci_master_info[MASTER_1].reset_complete);
+		}
+	}
+
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+		(irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		rd_done_th_assert = true;
+		complete(&cci_dev->cci_master_info[MASTER_0].th_complete);
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+	}
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) &&
+		(!rd_done_th_assert)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		rd_done_th_assert = true;
+		if (cci_dev->is_burst_read)
+			complete(
+			&cci_dev->cci_master_info[MASTER_0].th_complete);
+		complete(&cci_dev->cci_master_info[MASTER_0].reset_complete);
+	}
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
+		(!rd_done_th_assert)) {
+		cci_dev->cci_master_info[MASTER_0].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_0].th_complete);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK) {
+		struct cam_cci_master_info *cci_master_info;
+
+		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+		spin_lock_irqsave(
+			&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0],
+			flags);
+		atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+		cci_master_info->status = 0;
+		if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+			complete(&cci_master_info->report_q[QUEUE_0]);
+			atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+		}
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_0],
+			flags);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK) {
+		struct cam_cci_master_info *cci_master_info;
+
+		cci_master_info = &cci_dev->cci_master_info[MASTER_0];
+		spin_lock_irqsave(
+			&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1],
+			flags);
+		atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+		cci_master_info->status = 0;
+		if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+			complete(&cci_master_info->report_q[QUEUE_1]);
+			atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+		}
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[MASTER_0].lock_q[QUEUE_1],
+			flags);
+	}
+	rd_done_th_assert = false;
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+		(irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		rd_done_th_assert = true;
+		complete(&cci_dev->cci_master_info[MASTER_1].th_complete);
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+	}
+	if ((irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) &&
+		(!rd_done_th_assert)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		rd_done_th_assert = true;
+		if (cci_dev->is_burst_read)
+			complete(
+			&cci_dev->cci_master_info[MASTER_1].th_complete);
+		complete(&cci_dev->cci_master_info[MASTER_1].reset_complete);
+	}
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
+		(!rd_done_th_assert)) {
+		cci_dev->cci_master_info[MASTER_1].status = 0;
+		complete(&cci_dev->cci_master_info[MASTER_1].th_complete);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK) {
+		struct cam_cci_master_info *cci_master_info;
+
+		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+		spin_lock_irqsave(
+			&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0],
+			flags);
+		atomic_set(&cci_master_info->q_free[QUEUE_0], 0);
+		cci_master_info->status = 0;
+		if (atomic_read(&cci_master_info->done_pending[QUEUE_0]) == 1) {
+			complete(&cci_master_info->report_q[QUEUE_0]);
+			atomic_set(&cci_master_info->done_pending[QUEUE_0], 0);
+		}
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_0],
+			flags);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK) {
+		struct cam_cci_master_info *cci_master_info;
+
+		cci_master_info = &cci_dev->cci_master_info[MASTER_1];
+		spin_lock_irqsave(
+			&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1],
+			flags);
+		atomic_set(&cci_master_info->q_free[QUEUE_1], 0);
+		cci_master_info->status = 0;
+		if (atomic_read(&cci_master_info->done_pending[QUEUE_1]) == 1) {
+			complete(&cci_master_info->report_q[QUEUE_1]);
+			atomic_set(&cci_master_info->done_pending[QUEUE_1], 0);
+		}
+		spin_unlock_irqrestore(
+			&cci_dev->cci_master_info[MASTER_1].lock_q[QUEUE_1],
+			flags);
+	}
+	if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE)
+		CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_0");
+
+	if (irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE)
+		CAM_DBG(CAM_CCI, "RD_PAUSE ON MASTER_1");
+
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK) {
+		cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+		cam_io_w_mb(CCI_M0_RESET_RMSK,
+			base + CCI_RESET_CMD_ADDR);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK) {
+		cci_dev->cci_master_info[MASTER_1].reset_pending = TRUE;
+		cam_io_w_mb(CCI_M1_RESET_RMSK,
+			base + CCI_RESET_CMD_ADDR);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK) {
+		cci_dev->cci_master_info[MASTER_0].status = -EINVAL;
+		cam_io_w_mb(CCI_M0_HALT_REQ_RMSK,
+			base + CCI_HALT_REQ_ADDR);
+		CAM_DBG(CAM_CCI, "MASTER_0 error 0x%x", irq_status0);
+	}
+	if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK) {
+		cci_dev->cci_master_info[MASTER_1].status = -EINVAL;
+		cam_io_w_mb(CCI_M1_HALT_REQ_RMSK,
+			base + CCI_HALT_REQ_ADDR);
+		CAM_DBG(CAM_CCI, "MASTER_1 error 0x%x", irq_status0);
+
+	}
+
+	cam_io_w_mb(irq_status0, base + CCI_IRQ_CLEAR_0_ADDR);
+
+	reg_bmsk = CCI_IRQ_MASK_1_RMSK;
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) &&
+	!(irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK)) {
+		reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
+		cci_dev->irqs_disabled |=
+			CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+	}
+
+	if ((irq_status1 & CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) &&
+	!(irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK)) {
+		reg_bmsk &= ~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+		spin_lock_irqsave(&cci_dev->lock_status, flags);
+		cci_dev->irqs_disabled |=
+			CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+		spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+	}
+
+	if (reg_bmsk != CCI_IRQ_MASK_1_RMSK) {
+		cam_io_w_mb(reg_bmsk, base + CCI_IRQ_MASK_1_ADDR);
+		CAM_DBG(CAM_CCI, "Updating the reg mask for irq1: 0x%x",
+			reg_bmsk);
+	} else if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK ||
+		irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+		if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK) {
+			spin_lock_irqsave(&cci_dev->lock_status, flags);
+			if (cci_dev->irqs_disabled &
+				CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD) {
+				irq_update_rd_done |=
+					CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+				cci_dev->irqs_disabled &=
+					~CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD;
+			}
+			spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+		}
+		if (irq_status0 & CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK) {
+			spin_lock_irqsave(&cci_dev->lock_status, flags);
+			if (cci_dev->irqs_disabled &
+				CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD) {
+				irq_update_rd_done |=
+					CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+				cci_dev->irqs_disabled &=
+					~CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD;
+			}
+			spin_unlock_irqrestore(&cci_dev->lock_status, flags);
+		}
+	}
+
+	if (irq_update_rd_done != 0) {
+		irq_update_rd_done |= cam_io_r_mb(base + CCI_IRQ_MASK_1_ADDR);
+		cam_io_w_mb(irq_update_rd_done, base + CCI_IRQ_MASK_1_ADDR);
+	}
+
+
+	cam_io_w_mb(irq_status1, base + CCI_IRQ_CLEAR_1_ADDR);
+	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+	return IRQ_HANDLED;
+}
+
+static int cam_cci_irq_routine(struct v4l2_subdev *sd, u32 status,
+	bool *handled)
+{
+	struct cci_device *cci_dev = v4l2_get_subdevdata(sd);
+	irqreturn_t ret;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+
+	ret = cam_cci_irq(soc_info->irq_line->start, cci_dev);
+	*handled = TRUE;
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops cci_subdev_core_ops = {
+	.ioctl = cam_cci_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_cci_subdev_compat_ioctl,
+#endif
+	.interrupt_service_routine = cam_cci_irq_routine,
+};
+
+static const struct v4l2_subdev_ops cci_subdev_ops = {
+	.core = &cci_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cci_subdev_intern_ops;
+
+static struct v4l2_file_operations cci_v4l2_subdev_fops;
+
+static long cam_cci_subdev_do_ioctl(
+	struct file *file, unsigned int cmd, void *arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+	return cam_cci_subdev_ioctl(sd, cmd, NULL);
+}
+
+static long cam_cci_subdev_fops_ioctl(struct file *file, unsigned int cmd,
+	unsigned long arg)
+{
+	return video_usercopy(file, cmd, arg, cam_cci_subdev_do_ioctl);
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_cci_subdev_fops_compat_ioctl(struct file *file,
+	unsigned int cmd, unsigned long arg)
+{
+	struct video_device *vdev = video_devdata(file);
+	struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev);
+
+	return v4l2_subdev_call(sd, core, ioctl, cmd, NULL);
+}
+#endif
+
+static int cam_cci_platform_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_register_params cpas_parms;
+	struct cci_device *new_cci_dev;
+	struct cam_hw_soc_info *soc_info = NULL;
+	int rc = 0;
+
+	new_cci_dev = kzalloc(sizeof(struct cci_device),
+		GFP_KERNEL);
+	if (!new_cci_dev)
+		return -ENOMEM;
+
+	soc_info = &new_cci_dev->soc_info;
+
+	new_cci_dev->v4l2_dev_str.pdev = pdev;
+
+	soc_info->pdev = pdev;
+	soc_info->dev = &pdev->dev;
+	soc_info->dev_name = pdev->name;
+
+	rc = cam_cci_parse_dt_info(pdev, new_cci_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Resource get Failed: %d", rc);
+		goto cci_no_resource;
+	}
+
+	new_cci_dev->v4l2_dev_str.internal_ops =
+		&cci_subdev_intern_ops;
+	new_cci_dev->v4l2_dev_str.ops =
+		&cci_subdev_ops;
+	strlcpy(new_cci_dev->device_name, CAMX_CCI_DEV_NAME,
+		sizeof(new_cci_dev->device_name));
+	new_cci_dev->v4l2_dev_str.name =
+		new_cci_dev->device_name;
+	new_cci_dev->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	new_cci_dev->v4l2_dev_str.ent_function =
+		CAM_CCI_DEVICE_TYPE;
+	new_cci_dev->v4l2_dev_str.token =
+		new_cci_dev;
+
+	rc = cam_register_subdev(&(new_cci_dev->v4l2_dev_str));
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Fail with cam_register_subdev");
+		goto cci_no_resource;
+	}
+
+	platform_set_drvdata(pdev, &(new_cci_dev->v4l2_dev_str.sd));
+	v4l2_set_subdevdata(&new_cci_dev->v4l2_dev_str.sd, new_cci_dev);
+	if (soc_info->index >= MAX_CCI) {
+		CAM_ERR(CAM_CCI, "Invalid index: %d max supported:%d",
+			soc_info->index, MAX_CCI-1);
+		goto cci_no_resource;
+	}
+
+	g_cci_subdev[soc_info->index] = &new_cci_dev->v4l2_dev_str.sd;
+	CAM_ERR(CAM_CCI, "Device Type :%d", soc_info->index);
+
+	cam_register_subdev_fops(&cci_v4l2_subdev_fops);
+	cci_v4l2_subdev_fops.unlocked_ioctl = cam_cci_subdev_fops_ioctl;
+#ifdef CONFIG_COMPAT
+	cci_v4l2_subdev_fops.compat_ioctl32 =
+		cam_cci_subdev_fops_compat_ioctl;
+#endif
+
+	cpas_parms.cam_cpas_client_cb = NULL;
+	cpas_parms.cell_index = soc_info->index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = new_cci_dev;
+	strlcpy(cpas_parms.identifier, "cci", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		CAM_ERR(CAM_CCI, "CPAS registration failed");
+		goto cci_no_resource;
+	}
+	CAM_DBG(CAM_CCI, "CPAS registration successful handle=%d",
+		cpas_parms.client_handle);
+	new_cci_dev->cpas_handle = cpas_parms.client_handle;
+
+	return rc;
+cci_no_resource:
+	kfree(new_cci_dev);
+	return rc;
+}
+
+static int cam_cci_device_remove(struct platform_device *pdev)
+{
+	struct v4l2_subdev *subdev = platform_get_drvdata(pdev);
+	struct cci_device *cci_dev =
+		v4l2_get_subdevdata(subdev);
+
+	cam_cpas_unregister_client(cci_dev->cpas_handle);
+	cam_cci_soc_remove(pdev, cci_dev);
+	devm_kfree(&pdev->dev, cci_dev);
+	return 0;
+}
+
+static const struct of_device_id cam_cci_dt_match[] = {
+	{.compatible = "qcom,cci"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_cci_dt_match);
+
+static struct platform_driver cci_driver = {
+	.probe = cam_cci_platform_probe,
+	.remove = cam_cci_device_remove,
+	.driver = {
+		.name = CAMX_CCI_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_cci_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int cam_cci_assign_fops(void)
+{
+	struct v4l2_subdev *sd;
+	int i = 0;
+
+	for (; i < MAX_CCI; i++) {
+		sd = g_cci_subdev[i];
+		if (!sd)
+			return 0;
+		if (!(sd->devnode)) {
+			CAM_ERR(CAM_CCI,
+			"Invalid dev node:%pK offset: %d",
+			sd->devnode, i);
+			return -EINVAL;
+		}
+		sd->devnode->fops = &cci_v4l2_subdev_fops;
+	}
+
+	return 0;
+}
+
+static int __init cam_cci_late_init(void)
+{
+	return cam_cci_assign_fops();
+}
+
+static int __init cam_cci_init_module(void)
+{
+	return platform_driver_register(&cci_driver);
+}
+
+static void __exit cam_cci_exit_module(void)
+{
+	platform_driver_unregister(&cci_driver);
+}
+
+module_init(cam_cci_init_module);
+late_initcall(cam_cci_late_init);
+module_exit(cam_cci_exit_module);
+MODULE_DESCRIPTION("MSM CCI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
new file mode 100644
index 0000000..5bdb3fb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_dev.h
@@ -0,0 +1,307 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CCI_DEV_H_
+#define _CAM_CCI_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/cam_sensor.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_io_util.h>
+#include <cam_sensor_util.h>
+#include <cam_subdev.h>
+#include <cam_cpas_api.h>
+#include "cam_cci_hwreg.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define V4L2_IDENT_CCI 50005
+#define CCI_I2C_QUEUE_0_SIZE 128
+#define CCI_I2C_QUEUE_1_SIZE 32
+#define CYCLES_PER_MICRO_SEC_DEFAULT 4915
+#define CCI_MAX_DELAY 1000000
+
+#define CCI_TIMEOUT msecs_to_jiffies(1500)
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE  1
+#define FALSE 0
+
+#define CCI_PINCTRL_STATE_DEFAULT "cci_default"
+#define CCI_PINCTRL_STATE_SLEEP "cci_suspend"
+
+#define CCI_NUM_CLK_MAX 16
+#define CCI_NUM_CLK_CASES 5
+#define CCI_CLK_SRC_NAME "cci_src_clk"
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_10 10
+#define MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11 11
+#define BURST_MIN_FREE_SIZE 8
+#define MAX_LRME_V4l2_EVENTS 30
+
+/* Max bytes that can be read per CCI read transaction */
+#define CCI_READ_MAX 256
+#define CCI_I2C_READ_MAX_RETRIES 3
+#define CCI_I2C_MAX_READ 8192
+#define CCI_I2C_MAX_WRITE 8192
+#define CCI_I2C_MAX_BYTE_COUNT 65535
+
+#define CAMX_CCI_DEV_NAME "cam-cci-driver"
+
+#define MAX_CCI 2
+
+#define PRIORITY_QUEUE (QUEUE_0)
+#define SYNC_QUEUE (QUEUE_1)
+
+enum cci_i2c_sync {
+	MSM_SYNC_DISABLE,
+	MSM_SYNC_ENABLE,
+};
+
+enum cam_cci_cmd_type {
+	MSM_CCI_INIT,
+	MSM_CCI_RELEASE,
+	MSM_CCI_SET_SID,
+	MSM_CCI_SET_FREQ,
+	MSM_CCI_SET_SYNC_CID,
+	MSM_CCI_I2C_READ,
+	MSM_CCI_I2C_WRITE,
+	MSM_CCI_I2C_WRITE_SEQ,
+	MSM_CCI_I2C_WRITE_BURST,
+	MSM_CCI_I2C_WRITE_ASYNC,
+	MSM_CCI_GPIO_WRITE,
+	MSM_CCI_I2C_WRITE_SYNC,
+	MSM_CCI_I2C_WRITE_SYNC_BLOCK,
+};
+
+enum cci_i2c_queue_t {
+	QUEUE_0,
+	QUEUE_1,
+	QUEUE_INVALID,
+};
+
+struct cam_cci_wait_sync_cfg {
+	uint16_t cid;
+	int16_t csid;
+	uint16_t line;
+	uint16_t delay;
+};
+
+struct cam_cci_gpio_cfg {
+	uint16_t gpio_queue;
+	uint16_t i2c_queue;
+};
+
+struct cam_cci_read_cfg {
+	uint32_t addr;
+	uint16_t addr_type;
+	uint8_t *data;
+	uint16_t num_byte;
+	uint16_t data_type;
+};
+
+struct cam_cci_i2c_queue_info {
+	uint32_t max_queue_size;
+	uint32_t report_id;
+	uint32_t irq_en;
+	uint32_t capture_rep_data;
+};
+
+struct cam_cci_master_info {
+	uint32_t status;
+	atomic_t q_free[NUM_QUEUES];
+	uint8_t q_lock[NUM_QUEUES];
+	uint8_t reset_pending;
+	struct mutex mutex;
+	struct completion reset_complete;
+	struct completion th_complete;
+	struct mutex mutex_q[NUM_QUEUES];
+	struct completion report_q[NUM_QUEUES];
+	atomic_t done_pending[NUM_QUEUES];
+	spinlock_t lock_q[NUM_QUEUES];
+};
+
+struct cam_cci_clk_params_t {
+	uint16_t hw_thigh;
+	uint16_t hw_tlow;
+	uint16_t hw_tsu_sto;
+	uint16_t hw_tsu_sta;
+	uint16_t hw_thd_dat;
+	uint16_t hw_thd_sta;
+	uint16_t hw_tbuf;
+	uint8_t hw_scl_stretch_en;
+	uint8_t hw_trdhld;
+	uint8_t hw_tsp;
+	uint32_t cci_clk_src;
+};
+
+enum cam_cci_state_t {
+	CCI_STATE_ENABLED,
+	CCI_STATE_DISABLED,
+};
+
+/**
+ * struct cci_device
+ * @pdev: Platform device
+ * @subdev: V4L2 sub device
+ * @base: Base address of CCI device
+ * @hw_version: Hardware version
+ * @ref_count: Reference Count
+ * @cci_state: CCI state machine
+ * @num_clk: Number of CCI clock
+ * @cci_clk: CCI clock structure
+ * @cci_clk_info: CCI clock information
+ * @cam_cci_i2c_queue_info: CCI queue information
+ * @i2c_freq_mode: I2C frequency of operations
+ * @cci_clk_params: CCI hw clk params
+ * @cci_gpio_tbl: CCI GPIO table
+ * @cci_gpio_tbl_size: GPIO table size
+ * @cci_pinctrl: Pinctrl structure
+ * @cci_pinctrl_status: CCI pinctrl status
+ * @cci_clk_src: CCI clk src rate
+ * @cci_vreg: CCI regulator structure
+ * @cci_reg_ptr: CCI individual regulator structure
+ * @regulator_count: Regulator count
+ * @support_seq_write:
+ *     Set this flag when sequential write is enabled
+ * @write_wq: Work queue structure
+ * @valid_sync: Is it a valid sync with CSID
+ * @v4l2_dev_str: V4L2 device structure
+ * @cci_wait_sync_cfg: CCI sync config
+ * @cycles_per_us: Cycles per micro sec
+ * @payload_size: CCI packet payload size
+ * @irq_status1: Store irq_status1 to be cleared after
+ *               draining FIFO buffer for burst read
+ * @lock_status: to protect changes to irq_status1
+ * @is_burst_read: Flag to determine if we are performing
+ *                 a burst read operation or not
+ * @irqs_disabled: Mask for IRQs that are disabled
+ */
+struct cci_device {
+	struct v4l2_subdev subdev;
+	struct cam_hw_soc_info soc_info;
+	uint32_t hw_version;
+	uint8_t ref_count;
+	enum cam_cci_state_t cci_state;
+	struct cam_cci_i2c_queue_info
+		cci_i2c_queue_info[NUM_MASTERS][NUM_QUEUES];
+	struct cam_cci_master_info cci_master_info[NUM_MASTERS];
+	enum i2c_freq_mode i2c_freq_mode[NUM_MASTERS];
+	struct cam_cci_clk_params_t cci_clk_params[I2C_MAX_MODES];
+	struct msm_pinctrl_info cci_pinctrl;
+	uint8_t cci_pinctrl_status;
+	uint8_t support_seq_write;
+	struct workqueue_struct *write_wq[MASTER_MAX];
+	struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
+	uint8_t valid_sync;
+	struct cam_subdev v4l2_dev_str;
+	uint32_t cycles_per_us;
+	int32_t clk_level_index;
+	uint8_t payload_size;
+	char device_name[20];
+	uint32_t cpas_handle;
+	uint32_t irq_status1;
+	spinlock_t lock_status;
+	bool is_burst_read;
+	uint32_t irqs_disabled;
+};
+
+enum cam_cci_i2c_cmd_type {
+	CCI_I2C_SET_PARAM_CMD = 1,
+	CCI_I2C_WAIT_CMD,
+	CCI_I2C_WAIT_SYNC_CMD,
+	CCI_I2C_WAIT_GPIO_EVENT_CMD,
+	CCI_I2C_TRIG_I2C_EVENT_CMD,
+	CCI_I2C_LOCK_CMD,
+	CCI_I2C_UNLOCK_CMD,
+	CCI_I2C_REPORT_CMD,
+	CCI_I2C_WRITE_CMD,
+	CCI_I2C_READ_CMD,
+	CCI_I2C_WRITE_DISABLE_P_CMD,
+	CCI_I2C_READ_DISABLE_P_CMD,
+	CCI_I2C_WRITE_CMD2,
+	CCI_I2C_WRITE_CMD3,
+	CCI_I2C_REPEAT_CMD,
+	CCI_I2C_INVALID_CMD,
+};
+
+enum cam_cci_gpio_cmd_type {
+	CCI_GPIO_SET_PARAM_CMD = 1,
+	CCI_GPIO_WAIT_CMD,
+	CCI_GPIO_WAIT_SYNC_CMD,
+	CCI_GPIO_WAIT_GPIO_IN_EVENT_CMD,
+	CCI_GPIO_WAIT_I2C_Q_TRIG_EVENT_CMD,
+	CCI_GPIO_OUT_CMD,
+	CCI_GPIO_TRIG_EVENT_CMD,
+	CCI_GPIO_REPORT_CMD,
+	CCI_GPIO_REPEAT_CMD,
+	CCI_GPIO_CONTINUE_CMD,
+	CCI_GPIO_INVALID_CMD,
+};
+
+struct cam_sensor_cci_client {
+	struct v4l2_subdev *cci_subdev;
+	uint32_t freq;
+	enum i2c_freq_mode i2c_freq_mode;
+	enum cci_i2c_master_t cci_i2c_master;
+	uint16_t sid;
+	uint16_t cid;
+	uint32_t timeout;
+	uint16_t retries;
+	uint16_t id_map;
+	uint16_t cci_device;
+};
+
+struct cam_cci_ctrl {
+	int32_t status;
+	struct cam_sensor_cci_client *cci_info;
+	enum cam_cci_cmd_type cmd;
+	union {
+		struct cam_sensor_i2c_reg_setting cci_i2c_write_cfg;
+		struct cam_cci_read_cfg cci_i2c_read_cfg;
+		struct cam_cci_wait_sync_cfg cci_wait_sync_cfg;
+		struct cam_cci_gpio_cfg gpio_cfg;
+	} cfg;
+};
+
+struct cci_write_async {
+	struct cci_device *cci_dev;
+	struct cam_cci_ctrl c_ctrl;
+	enum cci_i2c_queue_t queue;
+	struct work_struct work;
+	enum cci_i2c_sync sync_en;
+};
+
+irqreturn_t cam_cci_irq(int irq_num, void *data);
+
+#ifdef CONFIG_SPECTRA_CAMERA
+extern struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index);
+#else
+static inline struct v4l2_subdev *cam_cci_get_subdev(int cci_dev_index)
+{
+	return NULL;
+}
+#endif
+
+#define VIDIOC_MSM_CCI_CFG \
+	_IOWR('V', BASE_VIDIOC_PRIVATE + 23, struct cam_cci_ctrl *)
+
+#endif /* _CAM_CCI_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
new file mode 100644
index 0000000..86134c3
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_hwreg.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2012-2015, 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CCI_HWREG_
+#define _CAM_CCI_HWREG_
+
+#define CCI_HW_VERSION_ADDR                                         0x00000000
+#define CCI_RESET_CMD_ADDR                                          0x00000004
+#define CCI_RESET_CMD_RMSK                                          0x0f73f3f7
+#define CCI_M0_RESET_RMSK                                                0x3F1
+#define CCI_M1_RESET_RMSK                                              0x3F001
+#define CCI_QUEUE_START_ADDR                                        0x00000008
+#define CCI_SET_CID_SYNC_TIMER_ADDR                                 0x00000010
+#define CCI_SET_CID_SYNC_TIMER_OFFSET                               0x00000004
+#define CCI_I2C_M0_SCL_CTL_ADDR                                     0x00000100
+#define CCI_I2C_M0_SDA_CTL_0_ADDR                                   0x00000104
+#define CCI_I2C_M0_SDA_CTL_1_ADDR                                   0x00000108
+#define CCI_I2C_M0_SDA_CTL_2_ADDR                                   0x0000010c
+#define CCI_I2C_M0_READ_DATA_ADDR                                   0x00000118
+#define CCI_I2C_M0_MISC_CTL_ADDR                                    0x00000110
+#define CCI_I2C_M0_READ_BUF_LEVEL_ADDR                              0x0000011C
+#define CCI_HALT_REQ_ADDR                                           0x00000034
+#define CCI_M0_HALT_REQ_RMSK                                               0x1
+#define CCI_M1_HALT_REQ_RMSK                                               0x2
+#define CCI_I2C_M1_SCL_CTL_ADDR                                     0x00000200
+#define CCI_I2C_M1_SDA_CTL_0_ADDR                                   0x00000204
+#define CCI_I2C_M1_SDA_CTL_1_ADDR                                   0x00000208
+#define CCI_I2C_M1_SDA_CTL_2_ADDR                                   0x0000020c
+#define CCI_I2C_M1_MISC_CTL_ADDR                                    0x00000210
+#define CCI_I2C_M0_Q0_CUR_WORD_CNT_ADDR                             0x00000304
+#define CCI_I2C_M0_Q0_CUR_CMD_ADDR                                  0x00000308
+#define CCI_I2C_M0_Q0_REPORT_STATUS_ADDR                            0x0000030c
+#define CCI_I2C_M0_Q0_EXEC_WORD_CNT_ADDR                            0x00000300
+#define CCI_I2C_M0_Q0_LOAD_DATA_ADDR                                0x00000310
+#define CCI_IRQ_MASK_0_ADDR                                         0x00000c04
+#define CCI_IRQ_MASK_0_RMSK                                         0x7fff7ff7
+#define CCI_IRQ_MASK_1_ADDR                                         0x00000c10
+#define CCI_IRQ_MASK_1_RMSK                                         0x00110000
+#define CCI_IRQ_CLEAR_0_ADDR                                        0x00000c08
+#define CCI_IRQ_CLEAR_1_ADDR                                        0x00000c14
+#define CCI_IRQ_STATUS_0_ADDR                                       0x00000c0c
+#define CCI_IRQ_STATUS_1_ADDR                                       0x00000c18
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0Q1_HALT_ACK_BMSK                   0x4000000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0Q1_HALT_ACK_BMSK                   0x2000000
+#define CCI_IRQ_STATUS_0_RST_DONE_ACK_BMSK                           0x1000000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q1_REPORT_BMSK                        0x100000
+#define CCI_IRQ_STATUS_0_I2C_M1_Q0_REPORT_BMSK                         0x10000
+#define CCI_IRQ_STATUS_0_I2C_M1_RD_DONE_BMSK                            0x1000
+#define CCI_IRQ_STATUS_1_I2C_M1_RD_THRESHOLD                          0x100000
+#define CCI_IRQ_STATUS_1_I2C_M1_RD_PAUSE                              0x200000
+#define CCI_IRQ_STATUS_0_I2C_M0_Q1_REPORT_BMSK                           0x100
+#define CCI_IRQ_STATUS_0_I2C_M0_Q0_REPORT_BMSK                            0x10
+#define CCI_IRQ_STATUS_0_I2C_M0_ERROR_BMSK                          0x18000EE6
+#define CCI_IRQ_STATUS_0_I2C_M1_ERROR_BMSK                          0x60EE6000
+#define CCI_IRQ_STATUS_0_I2C_M0_RD_DONE_BMSK                               0x1
+#define CCI_IRQ_STATUS_1_I2C_M0_RD_THRESHOLD                           0x10000
+#define CCI_IRQ_STATUS_1_I2C_M0_RD_PAUSE                               0x20000
+#define CCI_I2C_M0_RD_THRESHOLD_ADDR                                0x00000120
+#define CCI_I2C_M1_RD_THRESHOLD_ADDR                                0x00000220
+#define CCI_I2C_RD_THRESHOLD_VALUE                                        0x30
+#define CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR                               0x00000c00
+
+#define DEBUG_TOP_REG_START                                                0x0
+#define DEBUG_TOP_REG_COUNT                                                 14
+#define DEBUG_MASTER_REG_START                                           0x100
+#define DEBUG_MASTER_REG_COUNT                                               9
+#define DEBUG_MASTER_QUEUE_REG_START                                     0x300
+#define DEBUG_MASTER_QUEUE_REG_COUNT                                         7
+#define DEBUG_INTR_REG_START                                             0xC00
+#define DEBUG_INTR_REG_COUNT                                                 7
+#endif /* _CAM_CCI_HWREG_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
new file mode 100644
index 0000000..5a6f478
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.c
@@ -0,0 +1,401 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_cci_dev.h"
+#include "cam_cci_core.h"
+
+int cam_cci_init(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl)
+{
+	uint8_t i = 0, j = 0;
+	int32_t rc = 0;
+	struct cci_device *cci_dev;
+	enum cci_i2c_master_t master = MASTER_0;
+	struct cam_ahb_vote ahb_vote;
+	struct cam_axi_vote axi_vote;
+	struct cam_hw_soc_info *soc_info = NULL;
+	void __iomem *base = NULL;
+
+	cci_dev = v4l2_get_subdevdata(sd);
+	if (!cci_dev || !c_ctrl) {
+		CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+			cci_dev, c_ctrl);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	soc_info = &cci_dev->soc_info;
+	base = soc_info->reg_map[0].mem_base;
+
+	if (!soc_info || !base) {
+		CAM_ERR(CAM_CCI, "failed: invalid params %pK %pK",
+			soc_info, base);
+		rc = -EINVAL;
+		return rc;
+	}
+
+	CAM_DBG(CAM_CCI, "Base address %pK", base);
+
+	if (cci_dev->ref_count++) {
+		CAM_DBG(CAM_CCI, "ref_count %d", cci_dev->ref_count);
+		master = c_ctrl->cci_info->cci_i2c_master;
+		CAM_DBG(CAM_CCI, "master %d", master);
+		if (master < MASTER_MAX && master >= 0) {
+			mutex_lock(&cci_dev->cci_master_info[master].mutex);
+			flush_workqueue(cci_dev->write_wq[master]);
+			/* Re-initialize the completion */
+			reinit_completion(
+			&cci_dev->cci_master_info[master].reset_complete);
+			for (i = 0; i < NUM_QUEUES; i++)
+				reinit_completion(
+				&cci_dev->cci_master_info[master].report_q[i]);
+			/* Set reset pending flag to TRUE */
+			cci_dev->cci_master_info[master].reset_pending = TRUE;
+			/* Set proper mask to RESET CMD address */
+			if (master == MASTER_0)
+				cam_io_w_mb(CCI_M0_RESET_RMSK,
+					base + CCI_RESET_CMD_ADDR);
+			else
+				cam_io_w_mb(CCI_M1_RESET_RMSK,
+					base + CCI_RESET_CMD_ADDR);
+			/* wait for reset done irq */
+			rc = wait_for_completion_timeout(
+			&cci_dev->cci_master_info[master].reset_complete,
+				CCI_TIMEOUT);
+			if (rc <= 0)
+				CAM_ERR(CAM_CCI, "wait failed %d", rc);
+			mutex_unlock(&cci_dev->cci_master_info[master].mutex);
+		}
+		return 0;
+	}
+
+	ahb_vote.type = CAM_VOTE_ABSOLUTE;
+	ahb_vote.vote.level = CAM_SVS_VOTE;
+	axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+	axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+	rc = cam_cpas_start(cci_dev->cpas_handle,
+		&ahb_vote, &axi_vote);
+	if (rc != 0)
+		CAM_ERR(CAM_CCI, "CPAS start failed");
+
+	cam_cci_get_clk_rates(cci_dev, c_ctrl);
+
+	/* Re-initialize the completion */
+	reinit_completion(&cci_dev->cci_master_info[master].reset_complete);
+	for (i = 0; i < NUM_QUEUES; i++)
+		reinit_completion(
+			&cci_dev->cci_master_info[master].report_q[i]);
+
+	/* Enable Regulators and IRQ*/
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_LOWSVS_VOTE, true);
+	if (rc < 0) {
+		CAM_DBG(CAM_CCI, "request platform resources failed");
+		goto platform_enable_failed;
+	}
+
+	cci_dev->hw_version = cam_io_r_mb(base +
+		CCI_HW_VERSION_ADDR);
+	CAM_DBG(CAM_CCI, "hw_version = 0x%x", cci_dev->hw_version);
+
+	cci_dev->payload_size =
+		MSM_CCI_WRITE_DATA_PAYLOAD_SIZE_11;
+	cci_dev->support_seq_write = 1;
+
+	for (i = 0; i < NUM_MASTERS; i++) {
+		for (j = 0; j < NUM_QUEUES; j++) {
+			if (j == QUEUE_0)
+				cci_dev->cci_i2c_queue_info[i][j].max_queue_size
+					= CCI_I2C_QUEUE_0_SIZE;
+			else
+				cci_dev->cci_i2c_queue_info[i][j].max_queue_size
+					= CCI_I2C_QUEUE_1_SIZE;
+
+			CAM_DBG(CAM_CCI, "CCI Master[%d] :: Q0 : %d Q1 : %d", i,
+			cci_dev->cci_i2c_queue_info[i][j].max_queue_size,
+			cci_dev->cci_i2c_queue_info[i][j].max_queue_size);
+		}
+	}
+
+	cci_dev->cci_master_info[MASTER_0].reset_pending = TRUE;
+	cam_io_w_mb(CCI_RESET_CMD_RMSK, base +
+			CCI_RESET_CMD_ADDR);
+	cam_io_w_mb(0x1, base + CCI_RESET_CMD_ADDR);
+	rc = wait_for_completion_timeout(
+		&cci_dev->cci_master_info[MASTER_0].reset_complete,
+		CCI_TIMEOUT);
+	if (rc <= 0) {
+		CAM_ERR(CAM_CCI, "wait_for_completion_timeout");
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+		goto reset_complete_failed;
+	}
+	for (i = 0; i < MASTER_MAX; i++)
+		cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+	cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+		base + CCI_IRQ_MASK_0_ADDR);
+	cam_io_w_mb(CCI_IRQ_MASK_0_RMSK,
+		base + CCI_IRQ_CLEAR_0_ADDR);
+	cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+		base + CCI_IRQ_MASK_1_ADDR);
+	cam_io_w_mb(CCI_IRQ_MASK_1_RMSK,
+		base + CCI_IRQ_CLEAR_1_ADDR);
+	cam_io_w_mb(0x1, base + CCI_IRQ_GLOBAL_CLEAR_CMD_ADDR);
+
+	for (i = 0; i < MASTER_MAX; i++) {
+		if (!cci_dev->write_wq[i]) {
+			CAM_ERR(CAM_CCI, "Failed to flush write wq");
+			rc = -ENOMEM;
+			goto reset_complete_failed;
+		} else {
+			flush_workqueue(cci_dev->write_wq[i]);
+		}
+	}
+
+	/* Set RD FIFO threshold for M0 & M1 */
+	cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+		base + CCI_I2C_M0_RD_THRESHOLD_ADDR);
+	cam_io_w_mb(CCI_I2C_RD_THRESHOLD_VALUE,
+		base + CCI_I2C_M1_RD_THRESHOLD_ADDR);
+
+	cci_dev->cci_state = CCI_STATE_ENABLED;
+
+	return 0;
+
+reset_complete_failed:
+	cam_soc_util_disable_platform_resource(soc_info, 1, 1);
+
+platform_enable_failed:
+	cci_dev->ref_count--;
+	cam_cpas_stop(cci_dev->cpas_handle);
+
+	return rc;
+}
+
+void cam_cci_soc_remove(struct platform_device *pdev,
+	struct cci_device *cci_dev)
+{
+	struct cam_hw_soc_info *soc_info = &cci_dev->soc_info;
+
+	cam_soc_util_release_platform_resource(soc_info);
+}
+
+static void cam_cci_init_cci_params(struct cci_device *new_cci_dev)
+{
+	uint8_t i = 0, j = 0;
+
+	for (i = 0; i < NUM_MASTERS; i++) {
+		new_cci_dev->cci_master_info[i].status = 0;
+		mutex_init(&new_cci_dev->cci_master_info[i].mutex);
+		init_completion(
+			&new_cci_dev->cci_master_info[i].reset_complete);
+		init_completion(
+			&new_cci_dev->cci_master_info[i].th_complete);
+
+		for (j = 0; j < NUM_QUEUES; j++) {
+			mutex_init(&new_cci_dev->cci_master_info[i].mutex_q[j]);
+			init_completion(
+				&new_cci_dev->cci_master_info[i].report_q[j]);
+			spin_lock_init(
+				&new_cci_dev->cci_master_info[i].lock_q[j]);
+		}
+	}
+	spin_lock_init(&new_cci_dev->lock_status);
+}
+
+static void cam_cci_init_default_clk_params(struct cci_device *cci_dev,
+	uint8_t index)
+{
+	/* default clock params are for 100Khz */
+	cci_dev->cci_clk_params[index].hw_thigh = 201;
+	cci_dev->cci_clk_params[index].hw_tlow = 174;
+	cci_dev->cci_clk_params[index].hw_tsu_sto = 204;
+	cci_dev->cci_clk_params[index].hw_tsu_sta = 231;
+	cci_dev->cci_clk_params[index].hw_thd_dat = 22;
+	cci_dev->cci_clk_params[index].hw_thd_sta = 162;
+	cci_dev->cci_clk_params[index].hw_tbuf = 227;
+	cci_dev->cci_clk_params[index].hw_scl_stretch_en = 0;
+	cci_dev->cci_clk_params[index].hw_trdhld = 6;
+	cci_dev->cci_clk_params[index].hw_tsp = 3;
+	cci_dev->cci_clk_params[index].cci_clk_src = 37500000;
+}
+
+static void cam_cci_init_clk_params(struct cci_device *cci_dev)
+{
+	int32_t rc = 0;
+	uint32_t val = 0;
+	uint8_t count = 0;
+	struct device_node *of_node = cci_dev->v4l2_dev_str.pdev->dev.of_node;
+	struct device_node *src_node = NULL;
+
+	for (count = 0; count < I2C_MAX_MODES; count++) {
+
+		if (count == I2C_STANDARD_MODE)
+			src_node = of_find_node_by_name(of_node,
+				"qcom,i2c_standard_mode");
+		else if (count == I2C_FAST_MODE)
+			src_node = of_find_node_by_name(of_node,
+				"qcom,i2c_fast_mode");
+		else if (count == I2C_FAST_PLUS_MODE)
+			src_node = of_find_node_by_name(of_node,
+				"qcom,i2c_fast_plus_mode");
+		else
+			src_node = of_find_node_by_name(of_node,
+				"qcom,i2c_custom_mode");
+
+		rc = of_property_read_u32(src_node, "hw-thigh", &val);
+		CAM_DBG(CAM_CCI, "hw-thigh %d, rc %d", val, rc);
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_thigh = val;
+			rc = of_property_read_u32(src_node, "hw-tlow",
+				&val);
+			CAM_DBG(CAM_CCI, "hw-tlow %d, rc %d",
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tlow = val;
+			rc = of_property_read_u32(src_node, "hw-tsu-sto",
+				&val);
+			CAM_DBG(CAM_CCI, "hw-tsu-sto %d, rc %d",
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tsu_sto = val;
+			rc = of_property_read_u32(src_node, "hw-tsu-sta",
+				&val);
+			CAM_DBG(CAM_CCI, "hw-tsu-sta %d, rc %d",
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tsu_sta = val;
+			rc = of_property_read_u32(src_node, "hw-thd-dat",
+				&val);
+			CAM_DBG(CAM_CCI, "hw-thd-dat %d, rc %d",
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_thd_dat = val;
+			rc = of_property_read_u32(src_node, "hw-thd-sta",
+				&val);
+			CAM_DBG(CAM_CCI, "hw-thd-sta %d, rc %d",
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_thd_sta = val;
+			rc = of_property_read_u32(src_node, "hw-tbuf",
+				&val);
+			CAM_DBG(CAM_CCI, "hw-tbuf %d, rc %d",
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tbuf = val;
+			rc = of_property_read_u32(src_node,
+				"hw-scl-stretch-en", &val);
+			CAM_DBG(CAM_CCI, "hw-scl-stretch-en %d, rc %d",
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_scl_stretch_en = val;
+			rc = of_property_read_u32(src_node, "hw-trdhld",
+				&val);
+			CAM_DBG(CAM_CCI, "hw-trdhld %d, rc %d",
+				val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_trdhld = val;
+			rc = of_property_read_u32(src_node, "hw-tsp",
+				&val);
+			CAM_DBG(CAM_CCI, "hw-tsp %d, rc %d", val, rc);
+		}
+		if (!rc) {
+			cci_dev->cci_clk_params[count].hw_tsp = val;
+			val = 0;
+			rc = of_property_read_u32(src_node, "cci-clk-src",
+				&val);
+			CAM_DBG(CAM_CCI, "cci-clk-src %d, rc %d", val, rc);
+			cci_dev->cci_clk_params[count].cci_clk_src = val;
+		} else
+			cam_cci_init_default_clk_params(cci_dev, count);
+
+		of_node_put(src_node);
+	}
+}
+
+int cam_cci_parse_dt_info(struct platform_device *pdev,
+	struct cci_device *new_cci_dev)
+{
+	int rc = 0, i = 0;
+	struct cam_hw_soc_info *soc_info =
+		&new_cci_dev->soc_info;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "Parsing DT data failed:%d", rc);
+		return -EINVAL;
+	}
+
+	new_cci_dev->ref_count = 0;
+
+	rc = cam_soc_util_request_platform_resource(soc_info,
+		cam_cci_irq, new_cci_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_CCI, "requesting platform resources failed:%d", rc);
+		return -EINVAL;
+	}
+	new_cci_dev->v4l2_dev_str.pdev = pdev;
+	cam_cci_init_cci_params(new_cci_dev);
+	cam_cci_init_clk_params(new_cci_dev);
+
+	rc = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (rc)
+		CAM_ERR(CAM_CCI, "failed to add child nodes, rc=%d", rc);
+
+	for (i = 0; i < MASTER_MAX; i++) {
+		new_cci_dev->write_wq[i] = create_singlethread_workqueue(
+			"cam_cci_wq");
+		if (!new_cci_dev->write_wq[i])
+			CAM_ERR(CAM_CCI, "Failed to create write wq");
+	}
+	CAM_DBG(CAM_CCI, "Exit");
+	return 0;
+}
+
+int cam_cci_soc_release(struct cci_device *cci_dev)
+{
+	uint8_t i = 0, rc = 0;
+	struct cam_hw_soc_info *soc_info =
+		&cci_dev->soc_info;
+
+	if (!cci_dev->ref_count || cci_dev->cci_state != CCI_STATE_ENABLED) {
+		CAM_ERR(CAM_CCI, "invalid ref count %d / cci state %d",
+			cci_dev->ref_count, cci_dev->cci_state);
+		return -EINVAL;
+	}
+	if (--cci_dev->ref_count) {
+		CAM_DBG(CAM_CCI, "ref_count Exit %d", cci_dev->ref_count);
+		return 0;
+	}
+	for (i = 0; i < MASTER_MAX; i++)
+		if (cci_dev->write_wq[i])
+			flush_workqueue(cci_dev->write_wq[i]);
+
+	for (i = 0; i < MASTER_MAX; i++)
+		cci_dev->i2c_freq_mode[i] = I2C_MAX_MODES;
+
+	rc = cam_soc_util_disable_platform_resource(soc_info, true, true);
+	if (rc) {
+		CAM_ERR(CAM_CCI, "platform resources disable failed, rc=%d",
+			rc);
+		return rc;
+	}
+
+	cci_dev->cci_state = CCI_STATE_DISABLED;
+	cci_dev->cycles_per_us = 0;
+
+	cam_cpas_stop(cci_dev->cpas_handle);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
new file mode 100644
index 0000000..4a6960d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_cci/cam_cci_soc.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CCI_SOC_H_
+#define _CAM_CCI_SOC_H_
+
+#include "cam_cci_core.h"
+#include "cam_soc_util.h"
+
+/**
+ * @sd: V4L2 sub device
+ * @c_ctrl: CCI control structure
+ *
+ * This API initializes the CCI and acquires SOC resources
+ */
+int cam_cci_init(struct v4l2_subdev *sd,
+	struct cam_cci_ctrl *c_ctrl);
+
+/**
+ * @cci_dev: CCI device structure
+ *
+ * This API releases the CCI and its SOC resources
+ */
+int cam_cci_soc_release(struct cci_device *cci_dev);
+
+/**
+ * @pdev: Platform device
+ * @new_cci_dev: CCI device structure
+ *
+ * This API parses CCI device tree
+ */
+int cam_cci_parse_dt_info(struct platform_device *pdev,
+	struct cci_device *new_cci_dev);
+
+/**
+ * @pdev: Platform device
+ * @cci_dev: CCI device structure
+ *
+ * This API puts all SOC resources
+ */
+void cam_cci_soc_remove(struct platform_device *pdev,
+	struct cci_device *cci_dev);
+#endif /* _CAM_CCI_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
new file mode 100644
index 0000000..2228f23
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_csiphy_soc.o cam_csiphy_dev.o cam_csiphy_core.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
new file mode 100644
index 0000000..8225758
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.c
@@ -0,0 +1,868 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include "cam_csiphy_core.h"
+#include "cam_csiphy_dev.h"
+#include "cam_csiphy_soc.h"
+#include "cam_common_util.h"
+
+#include <soc/qcom/scm.h>
+#include <cam_mem_mgr.h>
+
+#define SCM_SVC_CAMERASS 0x18
+#define SECURE_SYSCALL_ID 0x6
+#define SECURE_SYSCALL_ID_2 0x7
+
+#define LANE_MASK_2PH 0x1F
+#define LANE_MASK_3PH 0x7
+
+static int csiphy_dump;
+module_param(csiphy_dump, int, 0644);
+
+static int cam_csiphy_notify_secure_mode(struct csiphy_device *csiphy_dev,
+	bool protect, int32_t offset)
+{
+	struct scm_desc desc = {0};
+
+	if (offset >= CSIPHY_MAX_INSTANCES) {
+		CAM_ERR(CAM_CSIPHY, "Invalid CSIPHY offset");
+		return -EINVAL;
+	}
+
+	desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_VAL);
+	desc.args[0] = protect;
+	desc.args[1] = csiphy_dev->csiphy_cpas_cp_reg_mask[offset];
+
+	if (scm_call2(SCM_SIP_FNID(SCM_SVC_CAMERASS, SECURE_SYSCALL_ID_2),
+		&desc)) {
+		CAM_ERR(CAM_CSIPHY, "scm call to hypervisor failed");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int32_t cam_csiphy_get_instance_offset(
+	struct csiphy_device *csiphy_dev,
+	int32_t dev_handle)
+{
+	int32_t i;
+
+	if (csiphy_dev->acquire_count >
+		CSIPHY_MAX_INSTANCES) {
+		CAM_ERR(CAM_CSIPHY, "Invalid acquire count");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < csiphy_dev->acquire_count; i++) {
+		if (dev_handle ==
+			csiphy_dev->bridge_intf.device_hdl[i])
+			break;
+	}
+
+	return i;
+}
+
+void cam_csiphy_query_cap(struct csiphy_device *csiphy_dev,
+	struct cam_csiphy_query_cap *csiphy_cap)
+{
+	struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+	csiphy_cap->slot_info = soc_info->index;
+	csiphy_cap->version = csiphy_dev->hw_version;
+	csiphy_cap->clk_lane = csiphy_dev->clk_lane;
+}
+
+void cam_csiphy_reset(struct csiphy_device *csiphy_dev)
+{
+	int32_t  i;
+	void __iomem *base = NULL;
+	uint32_t size =
+		csiphy_dev->ctrl_reg->csiphy_reg.csiphy_reset_array_size;
+	struct cam_hw_soc_info *soc_info = &csiphy_dev->soc_info;
+
+	base = soc_info->reg_map[0].mem_base;
+
+	for (i = 0; i < size; i++) {
+		cam_io_w_mb(
+			csiphy_dev->ctrl_reg->csiphy_reset_reg[i].reg_data,
+			base +
+			csiphy_dev->ctrl_reg->csiphy_reset_reg[i].reg_addr);
+
+		usleep_range(csiphy_dev->ctrl_reg->csiphy_reset_reg[i].delay
+			* 1000,	csiphy_dev->ctrl_reg->csiphy_reset_reg[i].delay
+			* 1000 + 10);
+	}
+}
+
+int32_t cam_csiphy_update_secure_info(
+	struct csiphy_device *csiphy_dev,
+	struct cam_csiphy_info  *cam_cmd_csiphy_info,
+	struct cam_config_dev_cmd *cfg_dev)
+{
+	uint32_t clock_lane, adj_lane_mask, temp;
+	int32_t offset;
+
+	if (csiphy_dev->acquire_count >=
+		CSIPHY_MAX_INSTANCES) {
+		CAM_ERR(CAM_CSIPHY, "Invalid acquire count");
+		return -EINVAL;
+	}
+
+	offset = cam_csiphy_get_instance_offset(csiphy_dev,
+		cfg_dev->dev_handle);
+	if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+		CAM_ERR(CAM_CSIPHY, "Invalid offset");
+		return -EINVAL;
+	}
+
+	if (cam_cmd_csiphy_info->combo_mode)
+		clock_lane =
+			csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_combo_ck_ln;
+	else
+		clock_lane =
+			csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_clock_lane;
+
+	adj_lane_mask = cam_cmd_csiphy_info->lane_mask & LANE_MASK_2PH &
+		~clock_lane;
+	temp = adj_lane_mask & (clock_lane - 1);
+	adj_lane_mask =
+		((adj_lane_mask & (~(clock_lane - 1))) >> 1) | temp;
+
+	if (cam_cmd_csiphy_info->csiphy_3phase)
+		adj_lane_mask = cam_cmd_csiphy_info->lane_mask & LANE_MASK_3PH;
+
+	csiphy_dev->csiphy_info.secure_mode[offset] = 1;
+
+	csiphy_dev->csiphy_cpas_cp_reg_mask[offset] =
+		adj_lane_mask << (csiphy_dev->soc_info.index *
+		(CAM_CSIPHY_MAX_DPHY_LANES + CAM_CSIPHY_MAX_CPHY_LANES) +
+		(!cam_cmd_csiphy_info->csiphy_3phase) *
+		(CAM_CSIPHY_MAX_CPHY_LANES));
+
+	return 0;
+}
+
+int32_t cam_cmd_buf_parser(struct csiphy_device *csiphy_dev,
+	struct cam_config_dev_cmd *cfg_dev)
+{
+	int32_t                 rc = 0;
+	uintptr_t                generic_ptr;
+	struct cam_packet       *csl_packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	uint32_t                *cmd_buf = NULL;
+	struct cam_csiphy_info  *cam_cmd_csiphy_info = NULL;
+	size_t                  len;
+
+	if (!cfg_dev || !csiphy_dev) {
+		CAM_ERR(CAM_CSIPHY, "Invalid Args");
+		return -EINVAL;
+	}
+
+	rc = cam_mem_get_cpu_buf((int32_t) cfg_dev->packet_handle,
+		&generic_ptr, &len);
+	if (rc < 0) {
+		CAM_ERR(CAM_CSIPHY, "Failed to get packet Mem address: %d", rc);
+		return rc;
+	}
+
+	if (cfg_dev->offset > len) {
+		CAM_ERR(CAM_CSIPHY,
+			"offset is out of bounds: offset: %lld len: %zu",
+			cfg_dev->offset, len);
+		return -EINVAL;
+	}
+
+	csl_packet = (struct cam_packet *)
+		(generic_ptr + (uint32_t)cfg_dev->offset);
+
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *)&csl_packet->payload +
+		csl_packet->cmd_buf_offset / 4);
+
+	rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+		&generic_ptr, &len);
+	if (rc < 0) {
+		CAM_ERR(CAM_CSIPHY,
+			"Failed to get cmd buf Mem address : %d", rc);
+		return rc;
+	}
+
+	cmd_buf = (uint32_t *)generic_ptr;
+	cmd_buf += cmd_desc->offset / 4;
+	cam_cmd_csiphy_info = (struct cam_csiphy_info *)cmd_buf;
+
+	csiphy_dev->config_count++;
+	csiphy_dev->csiphy_info.lane_cnt += cam_cmd_csiphy_info->lane_cnt;
+	csiphy_dev->csiphy_info.lane_mask |= cam_cmd_csiphy_info->lane_mask;
+	csiphy_dev->csiphy_info.csiphy_3phase =
+		cam_cmd_csiphy_info->csiphy_3phase;
+	csiphy_dev->csiphy_info.combo_mode |= cam_cmd_csiphy_info->combo_mode;
+	if (cam_cmd_csiphy_info->combo_mode == 1)
+		csiphy_dev->csiphy_info.settle_time_combo_sensor =
+			cam_cmd_csiphy_info->settle_time;
+	else
+		csiphy_dev->csiphy_info.settle_time =
+			cam_cmd_csiphy_info->settle_time;
+	csiphy_dev->csiphy_info.data_rate = cam_cmd_csiphy_info->data_rate;
+
+	if (cam_cmd_csiphy_info->secure_mode == 1)
+		cam_csiphy_update_secure_info(csiphy_dev,
+			cam_cmd_csiphy_info, cfg_dev);
+
+	return rc;
+}
+
+void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev)
+{
+	int32_t i;
+	void __iomem *csiphybase =
+		csiphy_dev->soc_info.reg_map[0].mem_base;
+
+	for (i = 0; i < csiphy_dev->num_irq_registers; i++)
+		cam_io_w_mb(
+			csiphy_dev->ctrl_reg->csiphy_irq_reg[i].reg_data,
+			csiphybase +
+			csiphy_dev->ctrl_reg->csiphy_irq_reg[i].reg_addr);
+}
+
+void cam_csiphy_cphy_irq_disable(struct csiphy_device *csiphy_dev)
+{
+	int32_t i;
+	void __iomem *csiphybase =
+		csiphy_dev->soc_info.reg_map[0].mem_base;
+
+	for (i = 0; i < csiphy_dev->num_irq_registers; i++)
+		cam_io_w_mb(0x0, csiphybase +
+			csiphy_dev->ctrl_reg->csiphy_irq_reg[i].reg_addr);
+}
+
+irqreturn_t cam_csiphy_irq(int irq_num, void *data)
+{
+	uint32_t irq;
+	uint8_t i;
+	struct csiphy_device *csiphy_dev =
+		(struct csiphy_device *)data;
+	struct cam_hw_soc_info *soc_info = NULL;
+	struct csiphy_reg_parms_t *csiphy_reg = NULL;
+	void __iomem *base = NULL;
+
+	if (!csiphy_dev) {
+		CAM_ERR(CAM_CSIPHY, "Invalid Args");
+		return IRQ_NONE;
+	}
+
+	soc_info = &csiphy_dev->soc_info;
+	base =  csiphy_dev->soc_info.reg_map[0].mem_base;
+	csiphy_reg = &csiphy_dev->ctrl_reg->csiphy_reg;
+
+	for (i = 0; i < csiphy_dev->num_irq_registers; i++) {
+		irq = cam_io_r(base +
+			csiphy_reg->mipi_csiphy_interrupt_status0_addr +
+			(0x4 * i));
+		cam_io_w_mb(irq, base +
+			csiphy_reg->mipi_csiphy_interrupt_clear0_addr +
+			(0x4 * i));
+		CAM_ERR_RATE_LIMIT(CAM_CSIPHY,
+			"CSIPHY%d_IRQ_STATUS_ADDR%d = 0x%x",
+			soc_info->index, i, irq);
+		cam_io_w_mb(0x0, base +
+			csiphy_reg->mipi_csiphy_interrupt_clear0_addr +
+			(0x4 * i));
+	}
+	cam_io_w_mb(0x1, base + csiphy_reg->mipi_csiphy_glbl_irq_cmd_addr);
+	cam_io_w_mb(0x0, base + csiphy_reg->mipi_csiphy_glbl_irq_cmd_addr);
+
+	return IRQ_HANDLED;
+}
+
+int32_t cam_csiphy_config_dev(struct csiphy_device *csiphy_dev)
+{
+	int32_t      rc = 0;
+	uint32_t     lane_enable = 0, mask = 1, size = 0;
+	uint16_t     lane_mask = 0, i = 0, cfg_size = 0, temp = 0;
+	uint8_t      lane_cnt, lane_pos = 0;
+	uint16_t     settle_cnt = 0;
+	void __iomem *csiphybase;
+	struct csiphy_reg_t *csiphy_common_reg = NULL;
+	struct csiphy_reg_t (*reg_array)[MAX_SETTINGS_PER_LANE];
+
+	lane_cnt = csiphy_dev->csiphy_info.lane_cnt;
+	csiphybase = csiphy_dev->soc_info.reg_map[0].mem_base;
+
+	if (!csiphybase) {
+		CAM_ERR(CAM_CSIPHY, "csiphybase NULL");
+		return -EINVAL;
+	}
+
+	if (!csiphy_dev->csiphy_info.csiphy_3phase) {
+		if (csiphy_dev->csiphy_info.combo_mode == 1)
+			reg_array =
+				csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg;
+		else
+			reg_array =
+				csiphy_dev->ctrl_reg->csiphy_2ph_reg;
+		csiphy_dev->num_irq_registers = 11;
+		cfg_size =
+		csiphy_dev->ctrl_reg->csiphy_reg.csiphy_2ph_config_array_size;
+
+		lane_mask = csiphy_dev->csiphy_info.lane_mask & LANE_MASK_2PH;
+		for (i = 0; i < MAX_DPHY_DATA_LN; i++) {
+			if (mask == 0x2) {
+				if (lane_mask & mask)
+					lane_enable |= 0x80;
+				i--;
+			} else if (lane_mask & mask) {
+				lane_enable |= 0x1 << (i<<1);
+			}
+			mask <<= 1;
+		}
+	} else {
+		if (csiphy_dev->csiphy_info.combo_mode == 1) {
+			if (csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg)
+				reg_array =
+				csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg;
+			else {
+				reg_array =
+					csiphy_dev->ctrl_reg->csiphy_3ph_reg;
+				CAM_ERR(CAM_CSIPHY,
+					"Unsupported configuration, Falling back to CPHY mode");
+			}
+		} else
+			reg_array =
+				csiphy_dev->ctrl_reg->csiphy_3ph_reg;
+		csiphy_dev->num_irq_registers = 11;
+		cfg_size =
+		csiphy_dev->ctrl_reg->csiphy_reg.csiphy_3ph_config_array_size;
+
+		lane_mask = csiphy_dev->csiphy_info.lane_mask & LANE_MASK_3PH;
+		mask = lane_mask;
+		while (mask != 0) {
+			temp = (i << 1)+1;
+			lane_enable |= ((mask & 0x1) << temp);
+			mask >>= 1;
+			i++;
+		}
+	}
+
+	size = csiphy_dev->ctrl_reg->csiphy_reg.csiphy_common_array_size;
+
+	for (i = 0; i < size; i++) {
+		csiphy_common_reg = &csiphy_dev->ctrl_reg->csiphy_common_reg[i];
+		switch (csiphy_common_reg->csiphy_param_type) {
+		case CSIPHY_LANE_ENABLE:
+			cam_io_w_mb(lane_enable,
+				csiphybase + csiphy_common_reg->reg_addr);
+			usleep_range(csiphy_common_reg->delay * 1000,
+				csiphy_common_reg->delay * 1000 + 10);
+			break;
+		case CSIPHY_DEFAULT_PARAMS:
+			cam_io_w_mb(csiphy_common_reg->reg_data,
+				csiphybase + csiphy_common_reg->reg_addr);
+			usleep_range(csiphy_common_reg->delay * 1000,
+				csiphy_common_reg->delay * 1000 + 10);
+			break;
+		case CSIPHY_2PH_REGS:
+			if (!csiphy_dev->csiphy_info.csiphy_3phase) {
+				cam_io_w_mb(csiphy_common_reg->reg_data,
+					csiphybase +
+					csiphy_common_reg->reg_addr);
+				usleep_range(csiphy_common_reg->delay * 1000,
+					csiphy_common_reg->delay * 1000 + 10);
+			}
+			break;
+		case CSIPHY_3PH_REGS:
+			if (csiphy_dev->csiphy_info.csiphy_3phase) {
+				cam_io_w_mb(csiphy_common_reg->reg_data,
+					csiphybase +
+					csiphy_common_reg->reg_addr);
+				usleep_range(csiphy_common_reg->delay * 1000,
+					csiphy_common_reg->delay * 1000 + 10);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+
+	while (lane_mask) {
+		if (!(lane_mask & 0x1)) {
+			lane_pos++;
+			lane_mask >>= 1;
+			continue;
+		}
+
+		settle_cnt = (csiphy_dev->csiphy_info.settle_time / 200000000);
+		if (csiphy_dev->csiphy_info.combo_mode == 1 &&
+			(lane_pos >= 3))
+			settle_cnt =
+			(csiphy_dev->csiphy_info.settle_time_combo_sensor /
+				200000000);
+		for (i = 0; i < cfg_size; i++) {
+			switch (reg_array[lane_pos][i].csiphy_param_type) {
+			case CSIPHY_LANE_ENABLE:
+				cam_io_w_mb(lane_enable,
+					csiphybase +
+					reg_array[lane_pos][i].reg_addr);
+			break;
+			case CSIPHY_DEFAULT_PARAMS:
+				cam_io_w_mb(reg_array[lane_pos][i].reg_data,
+					csiphybase +
+					reg_array[lane_pos][i].reg_addr);
+			break;
+			case CSIPHY_SETTLE_CNT_LOWER_BYTE:
+				cam_io_w_mb(settle_cnt & 0xFF,
+					csiphybase +
+					reg_array[lane_pos][i].reg_addr);
+			break;
+			case CSIPHY_SETTLE_CNT_HIGHER_BYTE:
+				cam_io_w_mb((settle_cnt >> 8) & 0xFF,
+					csiphybase +
+					reg_array[lane_pos][i].reg_addr);
+			break;
+			default:
+				CAM_DBG(CAM_CSIPHY, "Do Nothing");
+			break;
+			}
+			if (reg_array[lane_pos][i].delay > 0) {
+				usleep_range(reg_array[lane_pos][i].delay*1000,
+					reg_array[lane_pos][i].delay*1000 + 10);
+			}
+		}
+		lane_mask >>= 1;
+		lane_pos++;
+	}
+
+	cam_csiphy_cphy_irq_config(csiphy_dev);
+
+	return rc;
+}
+
+void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev)
+{
+	struct cam_hw_soc_info *soc_info;
+	int32_t i = 0;
+
+	if (csiphy_dev->csiphy_state == CAM_CSIPHY_INIT)
+		return;
+
+	if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
+		soc_info = &csiphy_dev->soc_info;
+
+		for (i = 0; i < csiphy_dev->acquire_count; i++) {
+			if (csiphy_dev->csiphy_info.secure_mode[i])
+				cam_csiphy_notify_secure_mode(
+					csiphy_dev,
+					CAM_SECURE_MODE_NON_SECURE, i);
+
+			csiphy_dev->csiphy_info.secure_mode[i] =
+				CAM_SECURE_MODE_NON_SECURE;
+
+			csiphy_dev->csiphy_cpas_cp_reg_mask[i] = 0;
+		}
+
+		cam_csiphy_reset(csiphy_dev);
+		cam_soc_util_disable_platform_resource(soc_info, true, true);
+
+		cam_cpas_stop(csiphy_dev->cpas_handle);
+		csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
+	}
+
+	if (csiphy_dev->csiphy_state == CAM_CSIPHY_ACQUIRE) {
+		if (csiphy_dev->bridge_intf.device_hdl[0] != -1)
+			cam_destroy_device_hdl(
+				csiphy_dev->bridge_intf.device_hdl[0]);
+		if (csiphy_dev->bridge_intf.device_hdl[1] != -1)
+			cam_destroy_device_hdl(
+				csiphy_dev->bridge_intf.device_hdl[1]);
+		csiphy_dev->bridge_intf.device_hdl[0] = -1;
+		csiphy_dev->bridge_intf.device_hdl[1] = -1;
+		csiphy_dev->bridge_intf.link_hdl[0] = -1;
+		csiphy_dev->bridge_intf.link_hdl[1] = -1;
+		csiphy_dev->bridge_intf.session_hdl[0] = -1;
+		csiphy_dev->bridge_intf.session_hdl[1] = -1;
+	}
+
+	csiphy_dev->ref_count = 0;
+	csiphy_dev->is_acquired_dev_combo_mode = 0;
+	csiphy_dev->acquire_count = 0;
+	csiphy_dev->start_dev_count = 0;
+	csiphy_dev->csiphy_state = CAM_CSIPHY_INIT;
+}
+
+static int32_t cam_csiphy_external_cmd(struct csiphy_device *csiphy_dev,
+	struct cam_config_dev_cmd *p_submit_cmd)
+{
+	struct cam_csiphy_info cam_cmd_csiphy_info;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cam_cmd_csiphy_info,
+		u64_to_user_ptr(p_submit_cmd->packet_handle),
+		sizeof(struct cam_csiphy_info))) {
+		CAM_ERR(CAM_CSIPHY, "failed to copy cam_csiphy_info\n");
+		rc = -EFAULT;
+	} else {
+		csiphy_dev->csiphy_info.lane_cnt =
+			cam_cmd_csiphy_info.lane_cnt;
+		csiphy_dev->csiphy_info.lane_cnt =
+			cam_cmd_csiphy_info.lane_cnt;
+		csiphy_dev->csiphy_info.lane_mask =
+			cam_cmd_csiphy_info.lane_mask;
+		csiphy_dev->csiphy_info.csiphy_3phase =
+			cam_cmd_csiphy_info.csiphy_3phase;
+		csiphy_dev->csiphy_info.combo_mode =
+			cam_cmd_csiphy_info.combo_mode;
+		csiphy_dev->csiphy_info.settle_time =
+			cam_cmd_csiphy_info.settle_time;
+		csiphy_dev->csiphy_info.data_rate =
+			cam_cmd_csiphy_info.data_rate;
+		CAM_DBG(CAM_CSIPHY,
+			"%s CONFIG_DEV_EXT settle_time= %lld lane_cnt=%d lane_mask=0x%x",
+			__func__,
+			csiphy_dev->csiphy_info.settle_time,
+			csiphy_dev->csiphy_info.lane_cnt,
+			csiphy_dev->csiphy_info.lane_mask);
+	}
+
+	return rc;
+}
+
+int32_t cam_csiphy_core_cfg(void *phy_dev,
+			void *arg)
+{
+	struct csiphy_device *csiphy_dev =
+		(struct csiphy_device *)phy_dev;
+	struct intf_params   *bridge_intf = NULL;
+	struct cam_control   *cmd = (struct cam_control *)arg;
+	int32_t              rc = 0;
+
+	if (!csiphy_dev || !cmd) {
+		CAM_ERR(CAM_CSIPHY, "Invalid input args");
+		return -EINVAL;
+	}
+
+	if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+		CAM_ERR(CAM_CSIPHY, "Invalid handle type: %d",
+			cmd->handle_type);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_CSIPHY, "Opcode received: %d", cmd->op_code);
+	mutex_lock(&csiphy_dev->mutex);
+	switch (cmd->op_code) {
+	case CAM_ACQUIRE_DEV: {
+		struct cam_sensor_acquire_dev csiphy_acq_dev;
+		struct cam_csiphy_acquire_dev_info csiphy_acq_params;
+
+		struct cam_create_dev_hdl bridge_params;
+
+		rc = copy_from_user(&csiphy_acq_dev,
+			u64_to_user_ptr(cmd->handle),
+			sizeof(csiphy_acq_dev));
+		if (rc < 0) {
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+			goto release_mutex;
+		}
+
+		csiphy_acq_params.combo_mode = 0;
+
+		if (copy_from_user(&csiphy_acq_params,
+			u64_to_user_ptr(csiphy_acq_dev.info_handle),
+			sizeof(csiphy_acq_params))) {
+			CAM_ERR(CAM_CSIPHY,
+				"Failed copying from User");
+			goto release_mutex;
+		}
+
+		if (csiphy_dev->acquire_count == 2) {
+			CAM_ERR(CAM_CSIPHY,
+					"CSIPHY device do not allow more than 2 acquires");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if ((csiphy_acq_params.combo_mode == 1) &&
+			(csiphy_dev->is_acquired_dev_combo_mode == 1)) {
+			CAM_ERR(CAM_CSIPHY,
+				"Multiple Combo Acq are not allowed: cm: %d, acm: %d",
+				csiphy_acq_params.combo_mode,
+				csiphy_dev->is_acquired_dev_combo_mode);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if ((csiphy_acq_params.combo_mode != 1) &&
+			(csiphy_dev->is_acquired_dev_combo_mode != 1) &&
+			(csiphy_dev->acquire_count == 1)) {
+			CAM_ERR(CAM_CSIPHY,
+				"Multiple Acquires are not allowed cm: %d acm: %d",
+				csiphy_acq_params.combo_mode,
+				csiphy_dev->is_acquired_dev_combo_mode);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		bridge_params.ops = NULL;
+		bridge_params.session_hdl = csiphy_acq_dev.session_handle;
+		bridge_params.v4l2_sub_dev_flag = 0;
+		bridge_params.media_entity_flag = 0;
+		bridge_params.priv = csiphy_dev;
+
+		if (csiphy_acq_params.combo_mode >= 2) {
+			CAM_ERR(CAM_CSIPHY, "Invalid combo_mode %d",
+				csiphy_acq_params.combo_mode);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		csiphy_acq_dev.device_handle =
+			cam_create_device_hdl(&bridge_params);
+		bridge_intf = &csiphy_dev->bridge_intf;
+		bridge_intf->device_hdl[csiphy_acq_params.combo_mode]
+			= csiphy_acq_dev.device_handle;
+		bridge_intf->session_hdl[csiphy_acq_params.combo_mode] =
+			csiphy_acq_dev.session_handle;
+
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+				&csiphy_acq_dev,
+				sizeof(struct cam_sensor_acquire_dev))) {
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		if (csiphy_acq_params.combo_mode == 1)
+			csiphy_dev->is_acquired_dev_combo_mode = 1;
+
+		csiphy_dev->acquire_count++;
+		csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
+	}
+		break;
+	case CAM_QUERY_CAP: {
+		struct cam_csiphy_query_cap csiphy_cap = {0};
+
+		cam_csiphy_query_cap(csiphy_dev, &csiphy_cap);
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&csiphy_cap, sizeof(struct cam_csiphy_query_cap))) {
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+	}
+		break;
+	case CAM_STOP_DEV: {
+		int32_t offset, rc = 0;
+		struct cam_start_stop_dev_cmd config;
+
+		rc = copy_from_user(&config, (void __user *)cmd->handle,
+					sizeof(config));
+		if (rc < 0) {
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+			goto release_mutex;
+		}
+
+		if ((csiphy_dev->csiphy_state != CAM_CSIPHY_START) ||
+			!csiphy_dev->start_dev_count) {
+			CAM_ERR(CAM_CSIPHY, "Not in right state to stop : %d",
+				csiphy_dev->csiphy_state);
+			goto release_mutex;
+		}
+
+		offset = cam_csiphy_get_instance_offset(csiphy_dev,
+			config.dev_handle);
+		if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+			CAM_ERR(CAM_CSIPHY, "Invalid offset");
+			goto release_mutex;
+		}
+
+		if (--csiphy_dev->start_dev_count) {
+			CAM_DBG(CAM_CSIPHY, "Stop Dev ref Cnt: %d",
+				csiphy_dev->start_dev_count);
+			if (csiphy_dev->csiphy_info.secure_mode[offset])
+				cam_csiphy_notify_secure_mode(
+					csiphy_dev,
+					CAM_SECURE_MODE_NON_SECURE, offset);
+
+			csiphy_dev->csiphy_info.secure_mode[offset] =
+				CAM_SECURE_MODE_NON_SECURE;
+			csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0;
+
+			goto release_mutex;
+		}
+
+		if (csiphy_dev->csiphy_info.secure_mode[offset])
+			cam_csiphy_notify_secure_mode(
+				csiphy_dev,
+				CAM_SECURE_MODE_NON_SECURE, offset);
+
+		csiphy_dev->csiphy_info.secure_mode[offset] =
+			CAM_SECURE_MODE_NON_SECURE;
+
+		csiphy_dev->csiphy_cpas_cp_reg_mask[offset] = 0x0;
+
+		rc = cam_csiphy_disable_hw(csiphy_dev);
+		if (rc < 0)
+			CAM_ERR(CAM_CSIPHY, "Failed in csiphy release");
+
+		rc = cam_cpas_stop(csiphy_dev->cpas_handle);
+		if (rc < 0)
+			CAM_ERR(CAM_CSIPHY, "de-voting CPAS: %d", rc);
+
+		csiphy_dev->csiphy_state = CAM_CSIPHY_ACQUIRE;
+	}
+		break;
+	case CAM_RELEASE_DEV: {
+		struct cam_release_dev_cmd release;
+
+		if (!csiphy_dev->acquire_count) {
+			CAM_ERR(CAM_CSIPHY, "No valid devices to release");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if (copy_from_user(&release,
+			u64_to_user_ptr(cmd->handle),
+			sizeof(release))) {
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+
+		rc = cam_destroy_device_hdl(release.dev_handle);
+		if (rc < 0)
+			CAM_ERR(CAM_CSIPHY, "destroying the device hdl");
+		if (release.dev_handle ==
+			csiphy_dev->bridge_intf.device_hdl[0]) {
+			csiphy_dev->bridge_intf.device_hdl[0] = -1;
+			csiphy_dev->bridge_intf.link_hdl[0] = -1;
+			csiphy_dev->bridge_intf.session_hdl[0] = -1;
+		} else {
+			csiphy_dev->bridge_intf.device_hdl[1] = -1;
+			csiphy_dev->bridge_intf.link_hdl[1] = -1;
+			csiphy_dev->bridge_intf.session_hdl[1] = -1;
+			csiphy_dev->is_acquired_dev_combo_mode = 0;
+		}
+
+		csiphy_dev->config_count--;
+		csiphy_dev->acquire_count--;
+
+		if (csiphy_dev->acquire_count == 0)
+			csiphy_dev->csiphy_state = CAM_CSIPHY_INIT;
+
+		if (csiphy_dev->config_count == 0) {
+			CAM_DBG(CAM_CSIPHY, "reset csiphy_info");
+			csiphy_dev->csiphy_info.lane_mask = 0;
+			csiphy_dev->csiphy_info.lane_cnt = 0;
+			csiphy_dev->csiphy_info.combo_mode = 0;
+		}
+	}
+		break;
+	case CAM_CONFIG_DEV: {
+		struct cam_config_dev_cmd config;
+
+		if (copy_from_user(&config,
+			u64_to_user_ptr(cmd->handle),
+					sizeof(config))) {
+			rc = -EFAULT;
+		} else {
+			rc = cam_cmd_buf_parser(csiphy_dev, &config);
+			if (rc < 0) {
+				CAM_ERR(CAM_CSIPHY, "Fail in cmd buf parser");
+				goto release_mutex;
+			}
+		}
+		break;
+	}
+	case CAM_START_DEV: {
+		struct cam_ahb_vote ahb_vote;
+		struct cam_axi_vote axi_vote;
+		struct cam_start_stop_dev_cmd config;
+		int32_t offset;
+
+		rc = copy_from_user(&config, (void __user *)cmd->handle,
+			sizeof(config));
+		if (rc < 0) {
+			CAM_ERR(CAM_CSIPHY, "Failed copying from User");
+			goto release_mutex;
+		}
+
+		if (csiphy_dev->csiphy_state == CAM_CSIPHY_START) {
+			csiphy_dev->start_dev_count++;
+			goto release_mutex;
+		}
+
+		offset = cam_csiphy_get_instance_offset(csiphy_dev,
+			config.dev_handle);
+		if (offset < 0 || offset >= CSIPHY_MAX_INSTANCES) {
+			CAM_ERR(CAM_CSIPHY, "Invalid offset");
+			goto release_mutex;
+		}
+
+		ahb_vote.type = CAM_VOTE_ABSOLUTE;
+		ahb_vote.vote.level = CAM_SVS_VOTE;
+		axi_vote.compressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+		axi_vote.uncompressed_bw = CAM_CPAS_DEFAULT_AXI_BW;
+
+		rc = cam_cpas_start(csiphy_dev->cpas_handle,
+			&ahb_vote, &axi_vote);
+		if (rc < 0) {
+			CAM_ERR(CAM_CSIPHY, "voting CPAS: %d", rc);
+			goto release_mutex;
+		}
+
+		if (csiphy_dev->csiphy_info.secure_mode[offset] == 1) {
+			rc = cam_csiphy_notify_secure_mode(
+				csiphy_dev,
+				CAM_SECURE_MODE_SECURE, offset);
+			if (rc < 0) {
+				csiphy_dev->csiphy_info.secure_mode[offset] =
+					CAM_SECURE_MODE_NON_SECURE;
+				goto release_mutex;
+			}
+		}
+
+		rc = cam_csiphy_enable_hw(csiphy_dev);
+		if (rc != 0) {
+			CAM_ERR(CAM_CSIPHY, "cam_csiphy_enable_hw failed");
+			cam_cpas_stop(csiphy_dev->cpas_handle);
+			goto release_mutex;
+		}
+		rc = cam_csiphy_config_dev(csiphy_dev);
+		if (csiphy_dump == 1)
+			cam_csiphy_mem_dmp(&csiphy_dev->soc_info);
+
+		if (rc < 0) {
+			CAM_ERR(CAM_CSIPHY, "cam_csiphy_config_dev failed");
+			cam_csiphy_disable_hw(csiphy_dev);
+			cam_cpas_stop(csiphy_dev->cpas_handle);
+			goto release_mutex;
+		}
+		csiphy_dev->start_dev_count++;
+		csiphy_dev->csiphy_state = CAM_CSIPHY_START;
+	}
+		break;
+	case CAM_CONFIG_DEV_EXTERNAL: {
+		struct cam_config_dev_cmd submit_cmd;
+
+		if (copy_from_user(&submit_cmd,
+			u64_to_user_ptr(cmd->handle),
+			sizeof(struct cam_config_dev_cmd))) {
+			CAM_ERR(CAM_CSIPHY, "failed copy config ext\n");
+			rc = -EFAULT;
+		} else {
+			rc = cam_csiphy_external_cmd(csiphy_dev, &submit_cmd);
+		}
+		break;
+	}
+	default:
+		CAM_ERR(CAM_CSIPHY, "Invalid Opcode: %d", cmd->op_code);
+		rc = -EINVAL;
+		goto release_mutex;
+	}
+
+release_mutex:
+	mutex_unlock(&csiphy_dev->mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.h
new file mode 100644
index 0000000..23f4b9b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_core.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CSIPHY_CORE_H_
+#define _CAM_CSIPHY_CORE_H_
+
+#include <linux/irqreturn.h>
+#include "cam_csiphy_dev.h"
+#include <cam_mem_mgr.h>
+#include <cam_req_mgr_util.h>
+#include <cam_io_util.h>
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API programs CSIPhy IRQ  registers
+ */
+void cam_csiphy_cphy_irq_config(struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API resets CSIPhy hardware
+ */
+void cam_csiphy_reset(struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ * @arg:    Camera control command argument
+ *
+ * This API handles the camera control argument reached to CSIPhy
+ */
+int cam_csiphy_core_cfg(void *csiphy_dev, void *arg);
+
+/**
+ * @irq_num: IRQ number
+ * @data: CSIPhy device structure
+ *
+ * This API handles CSIPhy IRQs
+ */
+irqreturn_t cam_csiphy_irq(int irq_num, void *data);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API handles the CSIPhy close
+ */
+void cam_csiphy_shutdown(struct csiphy_device *csiphy_dev);
+
+#endif /* _CAM_CSIPHY_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
new file mode 100644
index 0000000..75dd548
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_csiphy_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_csiphy_soc.h"
+#include "cam_csiphy_core.h"
+#include <media/cam_sensor.h>
+
+static long cam_csiphy_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	struct csiphy_device *csiphy_dev = v4l2_get_subdevdata(sd);
+	int rc = 0;
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_csiphy_core_cfg(csiphy_dev, arg);
+		if (rc != 0) {
+			CAM_ERR(CAM_CSIPHY, "in configuring the device");
+			return rc;
+		}
+		break;
+	default:
+		CAM_ERR(CAM_CSIPHY, "Wrong ioctl : %d", cmd);
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_csiphy_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct csiphy_device *csiphy_dev =
+		v4l2_get_subdevdata(sd);
+
+	if (!csiphy_dev) {
+		CAM_ERR(CAM_CSIPHY, "csiphy_dev ptr is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&csiphy_dev->mutex);
+	cam_csiphy_shutdown(csiphy_dev);
+	mutex_unlock(&csiphy_dev->mutex);
+
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_csiphy_subdev_compat_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	int32_t rc = 0;
+	struct cam_control cmd_data;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_CSIPHY, "Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	/* All the arguments converted to 64 bit here
+	 * Passed to the api in core.c
+	 */
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_csiphy_subdev_ioctl(sd, cmd, &cmd_data);
+		break;
+	default:
+		CAM_ERR(CAM_CSIPHY, "Invalid compat ioctl cmd: %d", cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_CSIPHY,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+#endif
+
+static struct v4l2_subdev_core_ops csiphy_subdev_core_ops = {
+	.ioctl = cam_csiphy_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_csiphy_subdev_compat_ioctl,
+#endif
+};
+
+static const struct v4l2_subdev_ops csiphy_subdev_ops = {
+	.core = &csiphy_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops csiphy_subdev_intern_ops = {
+	.close = cam_csiphy_subdev_close,
+};
+
+static int32_t cam_csiphy_platform_probe(struct platform_device *pdev)
+{
+	struct cam_cpas_register_params cpas_parms;
+	struct csiphy_device *new_csiphy_dev;
+	int32_t              rc = 0;
+
+	new_csiphy_dev = devm_kzalloc(&pdev->dev,
+		sizeof(struct csiphy_device), GFP_KERNEL);
+	if (!new_csiphy_dev)
+		return -ENOMEM;
+
+	new_csiphy_dev->ctrl_reg = kzalloc(sizeof(struct csiphy_ctrl_t),
+		GFP_KERNEL);
+	if (!new_csiphy_dev->ctrl_reg) {
+		devm_kfree(&pdev->dev, new_csiphy_dev);
+		return -ENOMEM;
+	}
+
+	mutex_init(&new_csiphy_dev->mutex);
+	new_csiphy_dev->v4l2_dev_str.pdev = pdev;
+
+	new_csiphy_dev->soc_info.pdev = pdev;
+	new_csiphy_dev->soc_info.dev = &pdev->dev;
+	new_csiphy_dev->soc_info.dev_name = pdev->name;
+	new_csiphy_dev->ref_count = 0;
+
+	rc = cam_csiphy_parse_dt_info(pdev, new_csiphy_dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_CSIPHY, "DT parsing failed: %d", rc);
+		goto csiphy_no_resource;
+	}
+
+	new_csiphy_dev->v4l2_dev_str.internal_ops =
+		&csiphy_subdev_intern_ops;
+	new_csiphy_dev->v4l2_dev_str.ops =
+		&csiphy_subdev_ops;
+	strlcpy(new_csiphy_dev->device_name, CAMX_CSIPHY_DEV_NAME,
+		sizeof(new_csiphy_dev->device_name));
+	new_csiphy_dev->v4l2_dev_str.name =
+		new_csiphy_dev->device_name;
+	new_csiphy_dev->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	new_csiphy_dev->v4l2_dev_str.ent_function =
+		CAM_CSIPHY_DEVICE_TYPE;
+	new_csiphy_dev->v4l2_dev_str.token =
+		new_csiphy_dev;
+
+	rc = cam_register_subdev(&(new_csiphy_dev->v4l2_dev_str));
+	if (rc < 0) {
+		CAM_ERR(CAM_CSIPHY, "cam_register_subdev Failed rc: %d", rc);
+		goto csiphy_no_resource;
+	}
+
+	platform_set_drvdata(pdev, &(new_csiphy_dev->v4l2_dev_str.sd));
+	v4l2_set_subdevdata(&(new_csiphy_dev->v4l2_dev_str.sd), new_csiphy_dev);
+
+	new_csiphy_dev->bridge_intf.device_hdl[0] = -1;
+	new_csiphy_dev->bridge_intf.device_hdl[1] = -1;
+	new_csiphy_dev->bridge_intf.ops.get_dev_info =
+		NULL;
+	new_csiphy_dev->bridge_intf.ops.link_setup =
+		NULL;
+	new_csiphy_dev->bridge_intf.ops.apply_req =
+		NULL;
+
+	new_csiphy_dev->acquire_count = 0;
+	new_csiphy_dev->start_dev_count = 0;
+	new_csiphy_dev->is_acquired_dev_combo_mode = 0;
+
+	cpas_parms.cam_cpas_client_cb = NULL;
+	cpas_parms.cell_index = new_csiphy_dev->soc_info.index;
+	cpas_parms.dev = &pdev->dev;
+	cpas_parms.userdata = new_csiphy_dev;
+
+	strlcpy(cpas_parms.identifier, "csiphy", CAM_HW_IDENTIFIER_LENGTH);
+	rc = cam_cpas_register_client(&cpas_parms);
+	if (rc) {
+		CAM_ERR(CAM_CSIPHY, "CPAS registration failed rc: %d", rc);
+		goto csiphy_no_resource;
+	}
+	CAM_DBG(CAM_CSIPHY, "CPAS registration successful handle=%d",
+		cpas_parms.client_handle);
+	new_csiphy_dev->cpas_handle = cpas_parms.client_handle;
+
+	return rc;
+csiphy_no_resource:
+	mutex_destroy(&new_csiphy_dev->mutex);
+	kfree(new_csiphy_dev->ctrl_reg);
+	devm_kfree(&pdev->dev, new_csiphy_dev);
+	return rc;
+}
+
+
+static int32_t cam_csiphy_device_remove(struct platform_device *pdev)
+{
+	struct v4l2_subdev *subdev =
+		platform_get_drvdata(pdev);
+	struct csiphy_device *csiphy_dev =
+		v4l2_get_subdevdata(subdev);
+
+	cam_cpas_unregister_client(csiphy_dev->cpas_handle);
+	cam_csiphy_soc_release(csiphy_dev);
+	kfree(csiphy_dev->ctrl_reg);
+	devm_kfree(&pdev->dev, csiphy_dev);
+
+	return 0;
+}
+
+static const struct of_device_id cam_csiphy_dt_match[] = {
+	{.compatible = "qcom,csiphy"},
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, cam_csiphy_dt_match);
+
+static struct platform_driver csiphy_driver = {
+	.probe = cam_csiphy_platform_probe,
+	.remove = cam_csiphy_device_remove,
+	.driver = {
+		.name = CAMX_CSIPHY_DEV_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = cam_csiphy_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int32_t __init cam_csiphy_init_module(void)
+{
+	return platform_driver_register(&csiphy_driver);
+}
+
+static void __exit cam_csiphy_exit_module(void)
+{
+	platform_driver_unregister(&csiphy_driver);
+}
+
+module_init(cam_csiphy_init_module);
+module_exit(cam_csiphy_exit_module);
+MODULE_DESCRIPTION("CAM CSIPHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
new file mode 100644
index 0000000..b87e5f7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_dev.h
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CSIPHY_DEV_H_
+#define _CAM_CSIPHY_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_defs.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_subdev.h>
+#include <cam_io_util.h>
+#include <cam_cpas_api.h>
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+#define MAX_CSIPHY                  3
+#define MAX_DPHY_DATA_LN            4
+#define MAX_LRME_V4l2_EVENTS        30
+#define CSIPHY_NUM_CLK_MAX          16
+#define MAX_CSIPHY_REG_ARRAY        70
+#define MAX_CSIPHY_CMN_REG_ARRAY    5
+
+#define MAX_LANES             5
+#define MAX_SETTINGS_PER_LANE 43
+
+#define MAX_REGULATOR         5
+#define CAMX_CSIPHY_DEV_NAME "cam-csiphy-driver"
+
+#define CSIPHY_POWER_UP       0
+#define CSIPHY_POWER_DOWN     1
+
+#define CSIPHY_DEFAULT_PARAMS            0
+#define CSIPHY_LANE_ENABLE               1
+#define CSIPHY_SETTLE_CNT_LOWER_BYTE     2
+#define CSIPHY_SETTLE_CNT_HIGHER_BYTE    3
+#define CSIPHY_DNP_PARAMS                4
+#define CSIPHY_2PH_REGS                  5
+#define CSIPHY_3PH_REGS                  6
+
+#define CSIPHY_MAX_INSTANCES     2
+
+#define CAM_CSIPHY_MAX_DPHY_LANES    4
+#define CAM_CSIPHY_MAX_CPHY_LANES    3
+
+#define ENABLE_IRQ false
+
+#undef CDBG
+#ifdef CAM_CSIPHY_CORE_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+enum cam_csiphy_state {
+	CAM_CSIPHY_INIT,
+	CAM_CSIPHY_ACQUIRE,
+	CAM_CSIPHY_START,
+};
+
+/**
+ * struct csiphy_reg_parms_t
+ * @mipi_csiphy_glbl_irq_cmd_addr: CSIPhy irq addr
+ * @mipi_csiphy_interrupt_status0_addr:
+ *     CSIPhy interrupt status addr
+ * @mipi_csiphy_interrupt_mask0_addr:
+ *     CSIPhy interrupt mask addr
+ * @mipi_csiphy_interrupt_mask_val:
+ *      CSIPhy interrupt mask val
+ * @mipi_csiphy_interrupt_clear0_addr:
+ *     CSIPhy interrupt clear addr
+ * @csiphy_version: CSIPhy Version
+ * @csiphy_common_array_size: CSIPhy common array size
+ * @csiphy_reset_array_size: CSIPhy reset array size
+ * @csiphy_2ph_config_array_size: 2ph settings size
+ * @csiphy_3ph_config_array_size: 3ph settings size
+ * @csiphy_cpas_cp_bits_per_phy: CP bits per phy
+ * @csiphy_cpas_cp_is_interleaved: checks whether cp bits
+ *      are interleaved or not
+ * @csiphy_cpas_cp_2ph_offset: cp register 2ph offset
+ * @csiphy_cpas_cp_3ph_offset: cp register 3ph offset
+ * @csiphy_2ph_clock_lane: clock lane in 2ph
+ * @csiphy_2ph_combo_ck_ln: clk lane in combo 2ph
+ */
+struct csiphy_reg_parms_t {
+/*MIPI CSI PHY registers*/
+	uint32_t mipi_csiphy_glbl_irq_cmd_addr;
+	uint32_t mipi_csiphy_interrupt_status0_addr;
+	uint32_t mipi_csiphy_interrupt_mask0_addr;
+	uint32_t mipi_csiphy_interrupt_mask_val;
+	uint32_t mipi_csiphy_interrupt_mask_addr;
+	uint32_t mipi_csiphy_interrupt_clear0_addr;
+	uint32_t csiphy_version;
+	uint32_t csiphy_common_array_size;
+	uint32_t csiphy_reset_array_size;
+	uint32_t csiphy_2ph_config_array_size;
+	uint32_t csiphy_3ph_config_array_size;
+	uint32_t csiphy_cpas_cp_bits_per_phy;
+	uint32_t csiphy_cpas_cp_is_interleaved;
+	uint32_t csiphy_cpas_cp_2ph_offset;
+	uint32_t csiphy_cpas_cp_3ph_offset;
+	uint32_t csiphy_2ph_clock_lane;
+	uint32_t csiphy_2ph_combo_ck_ln;
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+	int32_t device_hdl[CSIPHY_MAX_INSTANCES];
+	int32_t session_hdl[CSIPHY_MAX_INSTANCES];
+	int32_t link_hdl[CSIPHY_MAX_INSTANCES];
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct csiphy_reg_t
+ * @reg_addr: Register address
+ * @reg_data: Register data
+ * @delay: Delay
+ * @csiphy_param_type: CSIPhy parameter type
+ */
+struct csiphy_reg_t {
+	int32_t  reg_addr;
+	int32_t  reg_data;
+	int32_t  delay;
+	uint32_t csiphy_param_type;
+};
+
+/**
+ * struct csiphy_ctrl_t
+ * @csiphy_reg: Register address
+ * @csiphy_common_reg: Common register set
+ * @csiphy_reset_reg: Reset register set
+ * @csiphy_2ph_reg: 2phase register set
+ * @csiphy_2ph_combo_mode_reg:
+ *     2phase combo register set
+ * @csiphy_3ph_reg: 3phase register set
+ * @csiphy_2ph_3ph_mode_reg:
+ *     2 phase 3phase combo register set
+ */
+struct csiphy_ctrl_t {
+	struct csiphy_reg_parms_t csiphy_reg;
+	struct csiphy_reg_t *csiphy_common_reg;
+	struct csiphy_reg_t *csiphy_irq_reg;
+	struct csiphy_reg_t *csiphy_reset_reg;
+	struct csiphy_reg_t (*csiphy_2ph_reg)[MAX_SETTINGS_PER_LANE];
+	struct csiphy_reg_t (*csiphy_2ph_combo_mode_reg)[MAX_SETTINGS_PER_LANE];
+	struct csiphy_reg_t (*csiphy_3ph_reg)[MAX_SETTINGS_PER_LANE];
+	struct csiphy_reg_t (*csiphy_2ph_3ph_mode_reg)[MAX_SETTINGS_PER_LANE];
+};
+
+/**
+ * cam_csiphy_param: Provides cmdbuffer structre
+ * @lane_mask     :  Lane mask details
+ * @lane_assign   :  Lane sensor will be using
+ * @csiphy_3phase :  Mentions DPHY or CPHY
+ * @combo_mode    :  Info regarding combo_mode is enable / disable
+ * @lane_cnt      :  Total number of lanes
+ * @reserved
+ * @3phase        :  Details whether 3Phase / 2Phase operation
+ * @settle_time   :  Settling time in ms
+ * @settle_time_combo_sensor   :  Settling time in ms
+ * @data_rate     :  Data rate in mbps
+ *
+ */
+struct cam_csiphy_param {
+	uint16_t    lane_mask;
+	uint16_t    lane_assign;
+	uint8_t     csiphy_3phase;
+	uint8_t     combo_mode;
+	uint8_t     lane_cnt;
+	uint8_t     secure_mode[CSIPHY_MAX_INSTANCES];
+	uint64_t    settle_time;
+	uint64_t    settle_time_combo_sensor;
+	uint64_t    data_rate;
+};
+
+/**
+ * struct csiphy_device
+ * @pdev: Platform device
+ * @irq: Interrupt structure
+ * @base: Base address
+ * @hw_version: Hardware Version
+ * @csiphy_state: CSIPhy state
+ * @ctrl_reg: CSIPhy control registers
+ * @num_clk: Number of clocks
+ * @csiphy_max_clk: Max timer clock rate
+ * @num_vreg: Number of regulators
+ * @csiphy_clk: Clock structure
+ * @csiphy_clk_info: Clock information structure
+ * @csiphy_vreg: Regulator structure
+ * @csiphy_reg_ptr: Regulator structure
+ * @csiphy_3p_clk_info: 3Phase clock information
+ * @csiphy_3p_clk: 3Phase clocks structure
+ * @csi_3phase: Is it a 3Phase mode
+ * @ref_count: Reference count
+ * @clk_lane: Clock lane
+ * @acquire_count: Acquire device count
+ * @start_dev_count: Start count
+ * @is_acquired_dev_combo_mode:
+ *    Flag that mentions whether already acquired
+ *   device is for combo mode
+ * @soc_info: SOC information
+ * @cpas_handle: CPAS handle
+ * @config_count: Config reg count
+ * @csiphy_cpas_cp_reg_mask: CP reg mask for phy instance
+ */
+struct csiphy_device {
+	struct mutex mutex;
+	uint32_t hw_version;
+	enum cam_csiphy_state csiphy_state;
+	struct csiphy_ctrl_t *ctrl_reg;
+	uint32_t csiphy_max_clk;
+	struct msm_cam_clk_info csiphy_3p_clk_info[2];
+	struct clk *csiphy_3p_clk[2];
+	unsigned char csi_3phase;
+	int32_t ref_count;
+	uint16_t lane_mask[MAX_CSIPHY];
+	uint8_t is_csiphy_3phase_hw;
+	uint8_t num_irq_registers;
+	struct cam_subdev v4l2_dev_str;
+	struct cam_csiphy_param csiphy_info;
+	struct intf_params bridge_intf;
+	uint32_t clk_lane;
+	uint32_t acquire_count;
+	uint32_t start_dev_count;
+	char device_name[20];
+	uint32_t is_acquired_dev_combo_mode;
+	struct cam_hw_soc_info   soc_info;
+	uint32_t cpas_handle;
+	uint32_t config_count;
+	uint64_t csiphy_cpas_cp_reg_mask[CSIPHY_MAX_INSTANCES];
+};
+
+#endif /* _CAM_CSIPHY_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
new file mode 100644
index 0000000..fcab25d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_csiphy_soc.h"
+#include "cam_csiphy_core.h"
+#include "include/cam_csiphy_1_1_hwreg.h"
+#include "include/cam_csiphy_1_0_hwreg.h"
+#include "include/cam_csiphy_1_2_hwreg.h"
+#include "include/cam_csiphy_2_0_hwreg.h"
+
+#define BYTES_PER_REGISTER           4
+#define NUM_REGISTER_PER_LINE        4
+#define REG_OFFSET(__start, __i)    ((__start) + ((__i) * BYTES_PER_REGISTER))
+
+static int cam_io_phy_dump(void __iomem *base_addr,
+	uint32_t start_offset, int size)
+{
+	char          line_str[128];
+	char         *p_str;
+	int           i;
+	uint32_t      data;
+
+	CAM_INFO(CAM_CSIPHY, "addr=%pK offset=0x%x size=%d",
+		base_addr, start_offset, size);
+
+	if (!base_addr || (size <= 0))
+		return -EINVAL;
+
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size; i++) {
+		if (i % NUM_REGISTER_PER_LINE == 0) {
+			snprintf(p_str, 12, "0x%08x: ",
+				REG_OFFSET(start_offset, i));
+			p_str += 11;
+		}
+		data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
+		snprintf(p_str, 9, "%08x ", data);
+		p_str += 8;
+		if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
+			CAM_ERR(CAM_CSIPHY, "%s", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		CAM_ERR(CAM_CSIPHY, "%s", line_str);
+
+	return 0;
+}
+
+int32_t cam_csiphy_mem_dmp(struct cam_hw_soc_info *soc_info)
+{
+	int32_t rc = 0;
+	resource_size_t size = 0;
+	void __iomem *addr = NULL;
+
+	if (!soc_info) {
+		rc = -EINVAL;
+		CAM_ERR(CAM_CSIPHY, "invalid input %d", rc);
+		return rc;
+	}
+	addr = soc_info->reg_map[0].mem_base;
+	size = resource_size(soc_info->mem_block[0]);
+	rc = cam_io_phy_dump(addr, 0, (size >> 2));
+	if (rc < 0) {
+		CAM_ERR(CAM_CSIPHY, "generating dump failed %d", rc);
+		return rc;
+	}
+	return rc;
+}
+
+int32_t cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev)
+{
+	int32_t rc = 0;
+	struct cam_hw_soc_info   *soc_info;
+
+	soc_info = &csiphy_dev->soc_info;
+
+	if (csiphy_dev->ref_count++) {
+		CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+			csiphy_dev->ref_count);
+		return rc;
+	}
+
+	rc = cam_soc_util_enable_platform_resource(soc_info, true,
+		CAM_SVS_VOTE, ENABLE_IRQ);
+	if (rc < 0) {
+		CAM_ERR(CAM_CSIPHY, "failed to enable platform resources %d",
+			rc);
+		return rc;
+	}
+
+	rc = cam_soc_util_set_src_clk_rate(soc_info,
+		soc_info->clk_rate[0][soc_info->src_clk_idx]);
+
+	if (rc < 0) {
+		CAM_ERR(CAM_CSIPHY, "csiphy_clk_set_rate failed rc: %d", rc);
+		goto csiphy_disable_platform_resource;
+	}
+
+	cam_csiphy_reset(csiphy_dev);
+
+	return rc;
+
+
+csiphy_disable_platform_resource:
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
+
+	return rc;
+}
+
+int32_t cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev)
+{
+	struct cam_hw_soc_info   *soc_info;
+
+	if (!csiphy_dev || !csiphy_dev->ref_count) {
+		CAM_ERR(CAM_CSIPHY, "csiphy dev NULL / ref_count ZERO");
+		return 0;
+	}
+	soc_info = &csiphy_dev->soc_info;
+
+	if (--csiphy_dev->ref_count) {
+		CAM_ERR(CAM_CSIPHY, "csiphy refcount = %d",
+			csiphy_dev->ref_count);
+		return 0;
+	}
+
+	cam_csiphy_reset(csiphy_dev);
+
+	cam_soc_util_disable_platform_resource(soc_info, true, true);
+
+	return 0;
+}
+
+int32_t cam_csiphy_parse_dt_info(struct platform_device *pdev,
+	struct csiphy_device *csiphy_dev)
+{
+	int32_t   rc = 0, i = 0;
+	uint32_t  clk_cnt = 0;
+	char      *csi_3p_clk_name = "csi_phy_3p_clk";
+	char      *csi_3p_clk_src_name = "csiphy_3p_clk_src";
+	struct cam_hw_soc_info   *soc_info;
+
+	csiphy_dev->is_csiphy_3phase_hw = 0;
+	soc_info = &csiphy_dev->soc_info;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_CSIPHY, "parsing common soc dt(rc %d)", rc);
+		return  rc;
+	}
+
+	csiphy_dev->is_csiphy_3phase_hw = 0;
+
+	if (of_device_is_compatible(soc_info->dev->of_node,
+		"qcom,csiphy-v1.0")) {
+		csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_0_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+				csiphy_2ph_v1_0_combo_mode_reg;
+		csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v1_0_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg =
+				csiphy_3ph_v1_0_combo_mode_reg;
+		csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_0;
+		csiphy_dev->ctrl_reg->csiphy_common_reg = csiphy_common_reg_1_0;
+		csiphy_dev->ctrl_reg->csiphy_reset_reg = csiphy_reset_reg_1_0;
+		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_0;
+		csiphy_dev->hw_version = CSIPHY_VERSION_V10;
+		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+		csiphy_dev->clk_lane = 0;
+	} else if (of_device_is_compatible(soc_info->dev->of_node,
+		"qcom,csiphy-v1.1")) {
+		csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_1_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+			csiphy_2ph_v1_1_combo_mode_reg;
+		csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v1_1_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg =
+			csiphy_3ph_v1_1_combo_mode_reg;
+		csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_1;
+		csiphy_dev->ctrl_reg->csiphy_common_reg =
+			csiphy_common_reg_1_1;
+		csiphy_dev->ctrl_reg->csiphy_reset_reg =
+			csiphy_reset_reg_1_1;
+		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_1;
+		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+		csiphy_dev->hw_version = CSIPHY_VERSION_V11;
+		csiphy_dev->clk_lane = 0;
+	} else if (of_device_is_compatible(soc_info->dev->of_node,
+		"qcom,csiphy-v1.2")) {
+		csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v1_2_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+			csiphy_2ph_v1_2_combo_mode_reg;
+		csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v1_2_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg = NULL;
+		csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_1_2;
+		csiphy_dev->ctrl_reg->csiphy_common_reg =
+			csiphy_common_reg_1_2;
+		csiphy_dev->ctrl_reg->csiphy_reset_reg =
+			csiphy_reset_reg_1_2;
+		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v1_2;
+		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+		csiphy_dev->hw_version = CSIPHY_VERSION_V12;
+		csiphy_dev->clk_lane = 0;
+	} else if (of_device_is_compatible(soc_info->dev->of_node,
+		"qcom,csiphy-v2.0")) {
+		csiphy_dev->ctrl_reg->csiphy_2ph_reg = csiphy_2ph_v2_0_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_combo_mode_reg =
+			csiphy_2ph_v2_0_combo_mode_reg;
+		csiphy_dev->ctrl_reg->csiphy_3ph_reg = csiphy_3ph_v2_0_reg;
+		csiphy_dev->ctrl_reg->csiphy_2ph_3ph_mode_reg = NULL;
+		csiphy_dev->ctrl_reg->csiphy_irq_reg = csiphy_irq_reg_2_0;
+		csiphy_dev->ctrl_reg->csiphy_common_reg = csiphy_common_reg_2_0;
+		csiphy_dev->ctrl_reg->csiphy_reset_reg = csiphy_reset_reg_2_0;
+		csiphy_dev->ctrl_reg->csiphy_reg = csiphy_v2_0;
+		csiphy_dev->hw_version = CSIPHY_VERSION_V20;
+		csiphy_dev->is_csiphy_3phase_hw = CSI_3PHASE_HW;
+		csiphy_dev->clk_lane = 0;
+	} else {
+		CAM_ERR(CAM_CSIPHY, "invalid hw version : 0x%x",
+			csiphy_dev->hw_version);
+		rc =  -EINVAL;
+		return rc;
+	}
+
+	if (soc_info->num_clk > CSIPHY_NUM_CLK_MAX) {
+		CAM_ERR(CAM_CSIPHY, "invalid clk count=%d, max is %d",
+			soc_info->num_clk, CSIPHY_NUM_CLK_MAX);
+		return -EINVAL;
+	}
+	for (i = 0; i < soc_info->num_clk; i++) {
+		if (!strcmp(soc_info->clk_name[i],
+			csi_3p_clk_src_name)) {
+			csiphy_dev->csiphy_3p_clk_info[0].clk_name =
+				soc_info->clk_name[i];
+			csiphy_dev->csiphy_3p_clk_info[0].clk_rate =
+				soc_info->clk_rate[0][i];
+			csiphy_dev->csiphy_3p_clk[0] =
+				soc_info->clk[i];
+			continue;
+		} else if (!strcmp(soc_info->clk_name[i],
+				csi_3p_clk_name)) {
+			csiphy_dev->csiphy_3p_clk_info[1].clk_name =
+				soc_info->clk_name[i];
+			csiphy_dev->csiphy_3p_clk_info[1].clk_rate =
+				soc_info->clk_rate[0][i];
+			csiphy_dev->csiphy_3p_clk[1] =
+				soc_info->clk[i];
+			continue;
+		}
+
+		CAM_DBG(CAM_CSIPHY, "clk_rate[%d] = %d", clk_cnt,
+			soc_info->clk_rate[0][clk_cnt]);
+		clk_cnt++;
+	}
+
+	csiphy_dev->csiphy_max_clk =
+		soc_info->clk_rate[0][soc_info->src_clk_idx];
+
+	rc = cam_soc_util_request_platform_resource(&csiphy_dev->soc_info,
+		cam_csiphy_irq, csiphy_dev);
+
+	return rc;
+}
+
+int32_t cam_csiphy_soc_release(struct csiphy_device *csiphy_dev)
+{
+	if (!csiphy_dev) {
+		CAM_ERR(CAM_CSIPHY, "csiphy dev NULL");
+		return 0;
+	}
+
+	cam_soc_util_release_platform_resource(&csiphy_dev->soc_info);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
new file mode 100644
index 0000000..86b1f01
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/cam_csiphy_soc.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CSIPHY_SOC_H_
+#define _CAM_CSIPHY_SOC_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include "cam_csiphy_dev.h"
+#include "cam_csiphy_core.h"
+
+#undef CDBG
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+
+#define CSI_3PHASE_HW                               1
+#define CSIPHY_VERSION_V35                        0x35
+#define CSIPHY_VERSION_V10                        0x10
+#define CSIPHY_VERSION_V11                        0x11
+#define CSIPHY_VERSION_V12                        0x12
+#define CSIPHY_VERSION_V20                        0x20
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API release SOC related parameters
+ */
+int cam_csiphy_soc_release(struct csiphy_device *csiphy_dev);
+
+/**
+ * @pdev: Platform device
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API parses csiphy device tree information
+ */
+int cam_csiphy_parse_dt_info(struct platform_device *pdev,
+	struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API enables SOC related parameters
+ */
+int cam_csiphy_enable_hw(struct csiphy_device *csiphy_dev);
+
+/**
+ * @csiphy_dev: CSIPhy device structure
+ *
+ * This API disables SOC related parameters
+ */
+int cam_csiphy_disable_hw(struct csiphy_device *csiphy_dev);
+
+/**
+ * @soc_info: Soc info of cam hw driver module
+ *
+ * This API dumps memory for the entire mapped region
+ * (needs to be macro enabled before use)
+ */
+int cam_csiphy_mem_dmp(struct cam_hw_soc_info *soc_info);
+
+#endif /* _CAM_CSIPHY_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
new file mode 100644
index 0000000..5e5bbc4
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_0_hwreg.h
@@ -0,0 +1,348 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CSIPHY_1_0_HWREG_H_
+#define _CAM_CSIPHY_1_0_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v1_0 = {
+	.mipi_csiphy_interrupt_status0_addr = 0x8B0,
+	.mipi_csiphy_interrupt_clear0_addr = 0x858,
+	.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+	.csiphy_common_array_size = 5,
+	.csiphy_reset_array_size = 5,
+	.csiphy_2ph_config_array_size = 14,
+	.csiphy_3ph_config_array_size = 19,
+};
+
+struct csiphy_reg_t csiphy_common_reg_1_0[] = {
+	{0x0814, 0x00, 0x00, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_1_0[] = {
+	{0x0814, 0x00, 0x05, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_1_0[] = {
+	{0x082c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0830, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0834, 0xFB, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0838, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x083c, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0840, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0844, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0848, 0xEF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x084c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0850, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0854, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_2ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t
+	csiphy_2ph_v1_0_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x0A, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x14, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x0A, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0x91, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x00, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t csiphy_3ph_v1_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x015C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x035C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x055C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t
+	csiphy_3ph_v1_0_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x015C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x035C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x055C, 0x63, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0xAC, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x12, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0x51, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+#endif /* _CAM_CSIPHY_1_0_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h
new file mode 100644
index 0000000..cec83756
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_1_hwreg.h
@@ -0,0 +1,499 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CSIPHY_1_1_HWREG_H_
+#define _CAM_CSIPHY_1_1_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v1_1 = {
+	.mipi_csiphy_interrupt_status0_addr = 0x8B0,
+	.mipi_csiphy_interrupt_clear0_addr = 0x858,
+	.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+	.csiphy_common_array_size = 5,
+	.csiphy_reset_array_size = 5,
+	.csiphy_2ph_config_array_size = 14,
+	.csiphy_3ph_config_array_size = 43,
+	.csiphy_2ph_clock_lane = 0x1,
+	.csiphy_2ph_combo_ck_ln = 0x10,
+};
+
+struct csiphy_reg_t csiphy_common_reg_1_1[] = {
+	{0x0814, 0xd5, 0x00, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_1_1[] = {
+	{0x0814, 0x00, 0x05, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x01, 0x01, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_1_1[] = {
+	{0x082c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0830, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0834, 0xFB, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0838, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x083c, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0840, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0844, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0848, 0xEF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x084c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0850, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0854, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct
+csiphy_reg_t csiphy_2ph_v1_1_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0x90, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t
+	csiphy_2ph_v1_1_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x000C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0064, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0764, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x020C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0264, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0x8D, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x040C, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0464, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0x80, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x0E, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060C, 0xA5, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x31, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0664, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct
+csiphy_reg_t csiphy_3ph_v1_1_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0144, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0990, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0994, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0998, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0990, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0994, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0998, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x098C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x098C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0188, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x018C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0190, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09B0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09B0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0980, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09B0, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0984, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0988, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09AC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0344, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A90, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A8C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A8C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0388, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x038C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0390, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A80, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AAC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0544, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B90, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B94, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B98, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B8C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B8C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0588, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x058C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0590, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B80, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x24, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BAC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t
+	csiphy_3ph_v1_1_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0144, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0990, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0994, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0998, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0990, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0994, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0998, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x098C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x098C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0188, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x018C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0190, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09B0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09B0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0980, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09B0, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x015C, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0984, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0988, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09AC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x02, 0x00, CSIPHY_DNP_PARAMS},
+	},
+	{
+		{0x0344, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A90, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A8C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A8C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0388, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x038C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0390, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A80, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x035C, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AAC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x02, 0x00, CSIPHY_DNP_PARAMS},
+	},
+	{
+		{0x0544, 0xB6, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B90, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B94, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B98, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B8C, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B8C, 0x5F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0x8E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0x59, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0588, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x058C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0590, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B80, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x055C, 0x48, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B88, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BAC, 0x55, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x2B, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+#endif /* _CAM_CSIPHY_D5_0_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h
new file mode 100644
index 0000000..8df9aae
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_1_2_hwreg.h
@@ -0,0 +1,392 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CSIPHY_1_2_HWREG_H_
+#define _CAM_CSIPHY_1_2_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v1_2 = {
+	.mipi_csiphy_interrupt_status0_addr = 0x8B0,
+	.mipi_csiphy_interrupt_clear0_addr = 0x858,
+	.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+	.csiphy_common_array_size = 4,
+	.csiphy_reset_array_size = 4,
+	.csiphy_2ph_config_array_size = 21,
+	.csiphy_3ph_config_array_size = 31,
+	.csiphy_2ph_clock_lane = 0x1,
+	.csiphy_2ph_combo_ck_ln = 0x10,
+};
+
+struct csiphy_reg_t csiphy_common_reg_1_2[] = {
+	{0x0814, 0xd5, 0x00, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_1_2[] = {
+	{0x0814, 0x00, 0x05, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0800, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_1_2[] = {
+	{0x082c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0830, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0834, 0xFB, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0838, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x083c, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0840, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0844, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0848, 0xEF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x084c, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0850, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0854, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct
+csiphy_reg_t csiphy_2ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0910, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0900, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0908, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x00C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C80, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C88, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x07C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070c, 0x16, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A00, 0x0B, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A08, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x02C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B00, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B08, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x04C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C00, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C08, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x06C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t
+	csiphy_2ph_v1_2_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0030, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0904, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0910, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0900, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0908, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0904, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x00C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0014, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0004, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x000c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0010, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0730, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C84, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C80, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C88, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C84, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x07C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0714, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0704, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070c, 0x16, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x1F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0230, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A00, 0x0B, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A08, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x02C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0214, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0204, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x020c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0210, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0430, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B00, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B08, 0x1D, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x04C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0414, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0404, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x040c, 0x00, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0410, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0630, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C04, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C10, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C00, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C08, 0x14, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0C04, 0x07, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x06C4, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x0F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0614, 0x60, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0xD4, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0604, 0x0C, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060c, 0x16, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0610, 0x52, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0xFE, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct
+csiphy_reg_t csiphy_3ph_v1_2_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x015C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0990, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0994, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0998, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0990, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0994, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0998, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x098C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0150, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0188, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x018C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0190, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x09B0, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x035C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0A8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0350, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0388, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x038C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0390, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0AB0, 0x03, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x055C, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B90, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B94, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B98, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B90, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B94, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B98, 0x1A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0B8C, 0xAF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0xA0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0x25, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0550, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0588, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x058C, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0590, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x30, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x33, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05DC, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0BB0, 0x05, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0800, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+#endif /* _CAM_CSIPHY_1_2_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_0_hwreg.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_0_hwreg.h
new file mode 100644
index 0000000..5a5bda1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_csiphy/include/cam_csiphy_2_0_hwreg.h
@@ -0,0 +1,289 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_CSIPHY_2_0_HWREG_H_
+#define _CAM_CSIPHY_2_0_HWREG_H_
+
+#include "../cam_csiphy_dev.h"
+
+struct csiphy_reg_parms_t csiphy_v2_0 = {
+	.mipi_csiphy_interrupt_status0_addr = 0x8B0,
+	.mipi_csiphy_interrupt_clear0_addr = 0x858,
+	.mipi_csiphy_glbl_irq_cmd_addr = 0x828,
+	.csiphy_common_array_size = 6,
+	.csiphy_reset_array_size = 3,
+	.csiphy_2ph_config_array_size = 15,
+	.csiphy_3ph_config_array_size = 17,
+	.csiphy_2ph_clock_lane = 0x1,
+	.csiphy_2ph_combo_ck_ln = 0x10,
+};
+
+struct csiphy_reg_t csiphy_common_reg_2_0[] = {
+	{0x0814, 0x00, 0x00, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x06, 0x00, CSIPHY_3PH_REGS},
+	{0x0164, 0x00, 0x00, CSIPHY_2PH_REGS},
+	{0x0364, 0x00, 0x00, CSIPHY_2PH_REGS},
+	{0x0564, 0x00, 0x00, CSIPHY_2PH_REGS},
+};
+
+struct csiphy_reg_t csiphy_reset_reg_2_0[] = {
+	{0x0814, 0x00, 0x05, CSIPHY_LANE_ENABLE},
+	{0x0818, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x081C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_irq_reg_2_0[] = {
+	{0x082c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0830, 0xff, 0x01, CSIPHY_DEFAULT_PARAMS},
+	{0x0834, 0xfb, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0838, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x083c, 0x7f, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0840, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0844, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0848, 0xef, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x084c, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0850, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+	{0x0854, 0xff, 0x00, CSIPHY_DEFAULT_PARAMS},
+};
+
+struct csiphy_reg_t csiphy_2ph_v2_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0030, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0004, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x000C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0064, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0730, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0704, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0764, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0230, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0204, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x020C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0264, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0430, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0404, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x040C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0464, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0630, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0604, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0664, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t
+	csiphy_2ph_v2_0_combo_mode_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x0030, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x002C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0034, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0028, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+		{0x003C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x001C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0000, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0004, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0020, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0008, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x000C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0010, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0038, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0060, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0064, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0730, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x072C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0734, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0728, 0x04, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x073C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x071C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0700, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0704, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0720, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0708, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x070C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0710, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0738, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0760, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0764, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0230, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x022C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0234, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0228, 0x04, 0x00, CSIPHY_DNP_PARAMS},
+		{0x023C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x021C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0200, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0204, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0220, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0208, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x020C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0210, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0238, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0260, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0264, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0430, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x042C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0434, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0428, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x043C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x041C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0400, 0xD7, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0404, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0420, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0408, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x040C, 0xFF, 0x00, CSIPHY_DNP_PARAMS},
+		{0x0410, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0438, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0460, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0464, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x0630, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x062C, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0634, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0628, 0x0E, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x063C, 0xB8, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x061C, 0x0A, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0600, 0xC0, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0604, 0x08, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0620, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0608, 0x04, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x060C, 0xFF, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0610, 0x50, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0638, 0x01, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0660, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0664, 0x3F, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+struct csiphy_reg_t csiphy_3ph_v2_0_reg[MAX_LANES][MAX_SETTINGS_PER_LANE] = {
+	{
+		{0x015C, 0x23, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0104, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x010C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0108, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0114, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0168, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x016C, 0x17, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0118, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x011C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0120, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0124, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0128, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x012C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0144, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0160, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x01CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0164, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+	{
+		{0x035C, 0x23, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0304, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x030C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0308, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0314, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0368, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x036C, 0x17, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0318, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x031C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0320, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0324, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0328, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x032C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0344, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0360, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x03CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0364, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+		{
+		{0x055C, 0x23, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0504, 0x06, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x050C, 0x12, 0x00, CSIPHY_SETTLE_CNT_LOWER_BYTE},
+		{0x0508, 0x00, 0x00, CSIPHY_SETTLE_CNT_HIGHER_BYTE},
+		{0x0514, 0x20, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0568, 0x70, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x056C, 0x17, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0518, 0x3e, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x051C, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0520, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0524, 0x7F, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0528, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x052C, 0x00, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0544, 0x32, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0560, 0x02, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x05CC, 0x41, 0x00, CSIPHY_DEFAULT_PARAMS},
+		{0x0564, 0x40, 0x00, CSIPHY_DEFAULT_PARAMS},
+	},
+};
+
+#endif /* _CAM_CSIPHY_2_0_HWREG_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
new file mode 100644
index 0000000..b966477
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_eeprom_dev.o cam_eeprom_core.o cam_eeprom_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
new file mode 100644
index 0000000..5448b2c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.c
@@ -0,0 +1,938 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <media/cam_sensor.h>
+
+#include "cam_eeprom_core.h"
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+#include "cam_common_util.h"
+
+/**
+ * cam_eeprom_read_memory() - read map data into buffer
+ * @e_ctrl:     eeprom control struct
+ * @block:      block to be read
+ *
+ * This function iterates through blocks stored in block->map, reads each
+ * region and concatenate them into the pre-allocated block->mapdata
+ */
+static int cam_eeprom_read_memory(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_eeprom_memory_block_t *block)
+{
+	int                                rc = 0;
+	int                                j;
+	struct cam_sensor_i2c_reg_setting  i2c_reg_settings = {0};
+	struct cam_sensor_i2c_reg_array    i2c_reg_array = {0};
+	struct cam_eeprom_memory_map_t    *emap = block->map;
+	struct cam_eeprom_soc_private     *eb_info = NULL;
+	uint8_t                           *memptr = block->mapdata;
+
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "e_ctrl is NULL");
+		return -EINVAL;
+	}
+
+	eb_info = (struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+
+	for (j = 0; j < block->num_map; j++) {
+		CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X", emap[j].saddr);
+		if (emap[j].saddr) {
+			eb_info->i2c_info.slave_addr = emap[j].saddr;
+			rc = cam_eeprom_update_i2c_info(e_ctrl,
+				&eb_info->i2c_info);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM,
+					"failed: to update i2c info rc %d",
+					rc);
+				return rc;
+			}
+		}
+
+		if (emap[j].page.valid_size) {
+			i2c_reg_settings.addr_type = emap[j].page.addr_type;
+			i2c_reg_settings.data_type = emap[j].page.data_type;
+			i2c_reg_settings.size = 1;
+			i2c_reg_array.reg_addr = emap[j].page.addr;
+			i2c_reg_array.reg_data = emap[j].page.data;
+			i2c_reg_array.delay = emap[j].page.delay;
+			i2c_reg_settings.reg_setting = &i2c_reg_array;
+			rc = camera_io_dev_write(&e_ctrl->io_master_info,
+				&i2c_reg_settings);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "page write failed rc %d",
+					rc);
+				return rc;
+			}
+		}
+
+		if (emap[j].pageen.valid_size) {
+			i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+			i2c_reg_settings.data_type = emap[j].pageen.data_type;
+			i2c_reg_settings.size = 1;
+			i2c_reg_array.reg_addr = emap[j].pageen.addr;
+			i2c_reg_array.reg_data = emap[j].pageen.data;
+			i2c_reg_array.delay = emap[j].pageen.delay;
+			i2c_reg_settings.reg_setting = &i2c_reg_array;
+			rc = camera_io_dev_write(&e_ctrl->io_master_info,
+				&i2c_reg_settings);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "page enable failed rc %d",
+					rc);
+				return rc;
+			}
+		}
+
+		if (emap[j].poll.valid_size) {
+			rc = camera_io_dev_poll(&e_ctrl->io_master_info,
+				emap[j].poll.addr, emap[j].poll.data,
+				0, emap[j].poll.addr_type,
+				emap[j].poll.data_type,
+				emap[j].poll.delay);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "poll failed rc %d",
+					rc);
+				return rc;
+			}
+		}
+
+		if (emap[j].mem.valid_size) {
+			rc = camera_io_dev_read_seq(&e_ctrl->io_master_info,
+				emap[j].mem.addr, memptr,
+				emap[j].mem.addr_type,
+				emap[j].mem.data_type,
+				emap[j].mem.valid_size);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM, "read failed rc %d",
+					rc);
+				return rc;
+			}
+			memptr += emap[j].mem.valid_size;
+		}
+
+		if (emap[j].pageen.valid_size) {
+			i2c_reg_settings.addr_type = emap[j].pageen.addr_type;
+			i2c_reg_settings.data_type = emap[j].pageen.data_type;
+			i2c_reg_settings.size = 1;
+			i2c_reg_array.reg_addr = emap[j].pageen.addr;
+			i2c_reg_array.reg_data = 0;
+			i2c_reg_array.delay = emap[j].pageen.delay;
+			i2c_reg_settings.reg_setting = &i2c_reg_array;
+			rc = camera_io_dev_write(&e_ctrl->io_master_info,
+				&i2c_reg_settings);
+			if (rc) {
+				CAM_ERR(CAM_EEPROM,
+					"page disable failed rc %d",
+					rc);
+				return rc;
+			}
+		}
+	}
+	return rc;
+}
+
+/**
+ * cam_eeprom_power_up - Power up eeprom hardware
+ * @e_ctrl:     ctrl structure
+ * @power_info: power up/down info for eeprom
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_up(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int32_t                 rc = 0;
+	struct cam_hw_soc_info *soc_info =
+		&e_ctrl->soc_info;
+
+	/* Parse and fill vreg params for power up settings */
+	rc = msm_camera_fill_vreg_params(
+		&e_ctrl->soc_info,
+		power_info->power_setting,
+		power_info->power_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM,
+			"failed to fill power up vreg params rc:%d", rc);
+		return rc;
+	}
+
+	/* Parse and fill vreg params for power down settings*/
+	rc = msm_camera_fill_vreg_params(
+		&e_ctrl->soc_info,
+		power_info->power_down_setting,
+		power_info->power_down_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM,
+			"failed to fill power down vreg params  rc:%d", rc);
+		return rc;
+	}
+
+	power_info->dev = soc_info->dev;
+
+	rc = cam_sensor_core_power_up(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed in eeprom power up rc %d", rc);
+		return rc;
+	}
+
+	if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+		rc = camera_io_init(&(e_ctrl->io_master_info));
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "cci_init failed");
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
+
+/**
+ * cam_eeprom_power_down - Power down eeprom hardware
+ * @e_ctrl:    ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_power_down(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_hw_soc_info         *soc_info;
+	struct cam_eeprom_soc_private  *soc_private;
+	int                             rc = 0;
+
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "failed: e_ctrl %pK", e_ctrl);
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+	soc_info = &e_ctrl->soc_info;
+
+	if (!power_info) {
+		CAM_ERR(CAM_EEPROM, "failed: power_info %pK", power_info);
+		return -EINVAL;
+	}
+	rc = cam_sensor_util_power_down(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "power down the core is failed:%d", rc);
+		return rc;
+	}
+
+	if (e_ctrl->io_master_info.master_type == CCI_MASTER)
+		camera_io_release(&(e_ctrl->io_master_info));
+
+	return rc;
+}
+
+/**
+ * cam_eeprom_match_id - match eeprom id
+ * @e_ctrl:     ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_eeprom_match_id(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int                      rc;
+	struct camera_io_master *client = &e_ctrl->io_master_info;
+	uint8_t                  id[2];
+
+	rc = cam_spi_query_id(client, 0, CAMERA_SENSOR_I2C_TYPE_WORD,
+		&id[0], 2);
+	if (rc)
+		return rc;
+	CAM_DBG(CAM_EEPROM, "read 0x%x 0x%x, check 0x%x 0x%x",
+		id[0], id[1], client->spi_client->mfr_id0,
+		client->spi_client->device_id0);
+	if (id[0] != client->spi_client->mfr_id0
+		|| id[1] != client->spi_client->device_id0)
+		return -ENODEV;
+	return 0;
+}
+
+/**
+ * cam_eeprom_parse_read_memory_map - Parse memory map
+ * @of_node:    device node
+ * @e_ctrl:     ctrl structure
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+	struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int32_t                         rc = 0;
+	struct cam_eeprom_soc_private  *soc_private;
+	struct cam_sensor_power_ctrl_t *power_info;
+
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "failed: e_ctrl is NULL");
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	rc = cam_eeprom_parse_dt_memory_map(of_node, &e_ctrl->cal_data);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: eeprom dt parse rc %d", rc);
+		return rc;
+	}
+	rc = cam_eeprom_power_up(e_ctrl, power_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: eeprom power up rc %d", rc);
+		goto data_mem_free;
+	}
+
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG;
+	if (e_ctrl->eeprom_device_type == MSM_CAMERA_SPI_DEVICE) {
+		rc = cam_eeprom_match_id(e_ctrl);
+		if (rc) {
+			CAM_DBG(CAM_EEPROM, "eeprom not matching %d", rc);
+			goto power_down;
+		}
+	}
+	rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "read_eeprom_memory failed");
+		goto power_down;
+	}
+
+	rc = cam_eeprom_power_down(e_ctrl);
+	if (rc)
+		CAM_ERR(CAM_EEPROM, "failed: eeprom power down rc %d", rc);
+
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+	return rc;
+power_down:
+	cam_eeprom_power_down(e_ctrl);
+data_mem_free:
+	vfree(e_ctrl->cal_data.mapdata);
+	vfree(e_ctrl->cal_data.map);
+	e_ctrl->cal_data.num_data = 0;
+	e_ctrl->cal_data.num_map = 0;
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+	return rc;
+}
+
+/**
+ * cam_eeprom_get_dev_handle - get device handle
+ * @e_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_dev_handle(struct cam_eeprom_ctrl_t *e_ctrl,
+	void *arg)
+{
+	struct cam_sensor_acquire_dev    eeprom_acq_dev;
+	struct cam_create_dev_hdl        bridge_params;
+	struct cam_control              *cmd = (struct cam_control *)arg;
+
+	if (e_ctrl->bridge_intf.device_hdl != -1) {
+		CAM_ERR(CAM_EEPROM, "Device is already acquired");
+		return -EFAULT;
+	}
+	if (copy_from_user(&eeprom_acq_dev,
+		u64_to_user_ptr(cmd->handle),
+		sizeof(eeprom_acq_dev))) {
+		CAM_ERR(CAM_EEPROM,
+			"EEPROM:ACQUIRE_DEV: copy from user failed");
+		return -EFAULT;
+	}
+
+	bridge_params.session_hdl = eeprom_acq_dev.session_handle;
+	bridge_params.ops = &e_ctrl->bridge_intf.ops;
+	bridge_params.v4l2_sub_dev_flag = 0;
+	bridge_params.media_entity_flag = 0;
+	bridge_params.priv = e_ctrl;
+
+	eeprom_acq_dev.device_handle =
+		cam_create_device_hdl(&bridge_params);
+	e_ctrl->bridge_intf.device_hdl = eeprom_acq_dev.device_handle;
+	e_ctrl->bridge_intf.session_hdl = eeprom_acq_dev.session_handle;
+
+	CAM_DBG(CAM_EEPROM, "Device Handle: %d", eeprom_acq_dev.device_handle);
+	if (copy_to_user(u64_to_user_ptr(cmd->handle),
+		&eeprom_acq_dev, sizeof(struct cam_sensor_acquire_dev))) {
+		CAM_ERR(CAM_EEPROM, "EEPROM:ACQUIRE_DEV: copy to user failed");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * cam_eeprom_update_slaveInfo - Update slave info
+ * @e_ctrl:     ctrl structure
+ * @cmd_buf:    command buffer
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_update_slaveInfo(struct cam_eeprom_ctrl_t *e_ctrl,
+	void *cmd_buf)
+{
+	int32_t                         rc = 0;
+	struct cam_eeprom_soc_private  *soc_private;
+	struct cam_cmd_i2c_info        *cmd_i2c_info = NULL;
+
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	cmd_i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+	soc_private->i2c_info.slave_addr = cmd_i2c_info->slave_addr;
+	soc_private->i2c_info.i2c_freq_mode = cmd_i2c_info->i2c_freq_mode;
+
+	rc = cam_eeprom_update_i2c_info(e_ctrl,
+		&soc_private->i2c_info);
+	CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x Freq Mode: %d",
+		soc_private->i2c_info.slave_addr,
+		soc_private->i2c_info.i2c_freq_mode);
+
+	return rc;
+}
+
+/**
+ * cam_eeprom_parse_memory_map - Parse memory map info
+ * @data:             memory block data
+ * @cmd_buf:          command buffer
+ * @cmd_length:       command buffer length
+ * @num_map:          memory map size
+ * @cmd_length_bytes: command length processed in this function
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_parse_memory_map(
+	struct cam_eeprom_memory_block_t *data,
+	void *cmd_buf, int cmd_length, uint16_t *cmd_length_bytes,
+	int *num_map)
+{
+	int32_t                            rc = 0;
+	int32_t                            cnt = 0;
+	int32_t                            processed_size = 0;
+	uint8_t                            generic_op_code;
+	struct cam_eeprom_memory_map_t    *map = data->map;
+	struct common_header              *cmm_hdr =
+		(struct common_header *)cmd_buf;
+	uint16_t                           cmd_length_in_bytes = 0;
+	struct cam_cmd_i2c_random_wr      *i2c_random_wr = NULL;
+	struct cam_cmd_i2c_continuous_rd  *i2c_cont_rd = NULL;
+	struct cam_cmd_conditional_wait   *i2c_poll = NULL;
+	struct cam_cmd_unconditional_wait *i2c_uncond_wait = NULL;
+
+	generic_op_code = cmm_hdr->third_byte;
+	switch (cmm_hdr->cmd_type) {
+	case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+		i2c_random_wr = (struct cam_cmd_i2c_random_wr *)cmd_buf;
+		cmd_length_in_bytes   = sizeof(struct cam_cmd_i2c_random_wr) +
+			((i2c_random_wr->header.count - 1) *
+			sizeof(struct i2c_random_wr_payload));
+
+		for (cnt = 0; cnt < (i2c_random_wr->header.count);
+			cnt++) {
+			map[*num_map + cnt].page.addr =
+				i2c_random_wr->random_wr_payload[cnt].reg_addr;
+			map[*num_map + cnt].page.addr_type =
+				i2c_random_wr->header.addr_type;
+			map[*num_map + cnt].page.data =
+				i2c_random_wr->random_wr_payload[cnt].reg_data;
+			map[*num_map + cnt].page.data_type =
+				i2c_random_wr->header.data_type;
+			map[*num_map + cnt].page.valid_size = 1;
+		}
+
+		*num_map += (i2c_random_wr->header.count - 1);
+		cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+		processed_size +=
+			cmd_length_in_bytes;
+		break;
+	case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+		i2c_cont_rd = (struct cam_cmd_i2c_continuous_rd *)cmd_buf;
+		cmd_length_in_bytes = sizeof(struct cam_cmd_i2c_continuous_rd);
+
+		map[*num_map].mem.addr = i2c_cont_rd->reg_addr;
+		map[*num_map].mem.addr_type = i2c_cont_rd->header.addr_type;
+		map[*num_map].mem.data_type = i2c_cont_rd->header.data_type;
+		map[*num_map].mem.valid_size =
+			i2c_cont_rd->header.count;
+		cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+		processed_size +=
+			cmd_length_in_bytes;
+		data->num_data += map[*num_map].mem.valid_size;
+		break;
+	case CAMERA_SENSOR_CMD_TYPE_WAIT:
+		if (generic_op_code ==
+			CAMERA_SENSOR_WAIT_OP_HW_UCND ||
+			generic_op_code ==
+			CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+			i2c_uncond_wait =
+				(struct cam_cmd_unconditional_wait *)cmd_buf;
+			cmd_length_in_bytes =
+				sizeof(struct cam_cmd_unconditional_wait);
+
+			if (*num_map < 1) {
+				CAM_ERR(CAM_EEPROM,
+					"invalid map number, num_map=%d",
+					*num_map);
+				return -EINVAL;
+			}
+
+			/*
+			 * Though delay is added all of them, but delay will
+			 * be applicable to only one of them as only one of
+			 * them will have valid_size set to >= 1.
+			 */
+			map[*num_map - 1].mem.delay = i2c_uncond_wait->delay;
+			map[*num_map - 1].page.delay = i2c_uncond_wait->delay;
+			map[*num_map - 1].pageen.delay = i2c_uncond_wait->delay;
+		} else if (generic_op_code ==
+			CAMERA_SENSOR_WAIT_OP_COND) {
+			i2c_poll = (struct cam_cmd_conditional_wait *)cmd_buf;
+			cmd_length_in_bytes =
+				sizeof(struct cam_cmd_conditional_wait);
+
+			map[*num_map].poll.addr = i2c_poll->reg_addr;
+			map[*num_map].poll.addr_type = i2c_poll->addr_type;
+			map[*num_map].poll.data = i2c_poll->reg_data;
+			map[*num_map].poll.data_type = i2c_poll->data_type;
+			map[*num_map].poll.delay = i2c_poll->timeout;
+			map[*num_map].poll.valid_size = 1;
+		}
+		cmd_buf += cmd_length_in_bytes / sizeof(int32_t);
+		processed_size +=
+			cmd_length_in_bytes;
+		break;
+	default:
+		break;
+	}
+
+	*cmd_length_bytes = processed_size;
+	return rc;
+}
+
+/**
+ * cam_eeprom_init_pkt_parser - Parse eeprom packet
+ * @e_ctrl:       ctrl structure
+ * @csl_packet:	  csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_init_pkt_parser(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_packet *csl_packet)
+{
+	int32_t                         rc = 0;
+	int                             i = 0;
+	struct cam_cmd_buf_desc        *cmd_desc = NULL;
+	uint32_t                       *offset = NULL;
+	uint32_t                       *cmd_buf = NULL;
+	uintptr_t                        generic_pkt_addr;
+	size_t                          pkt_len = 0;
+	uint32_t                        total_cmd_buf_in_bytes = 0;
+	uint32_t                        processed_cmd_buf_in_bytes = 0;
+	struct common_header           *cmm_hdr = NULL;
+	uint16_t                        cmd_length_in_bytes = 0;
+	struct cam_cmd_i2c_info        *i2c_info = NULL;
+	int                             num_map = -1;
+	struct cam_eeprom_memory_map_t *map = NULL;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+	e_ctrl->cal_data.map = vzalloc((MSM_EEPROM_MEMORY_MAP_MAX_SIZE *
+		MSM_EEPROM_MAX_MEM_MAP_CNT) *
+		(sizeof(struct cam_eeprom_memory_map_t)));
+	if (!e_ctrl->cal_data.map) {
+		rc = -ENOMEM;
+		CAM_ERR(CAM_EEPROM, "failed");
+		return rc;
+	}
+	map = e_ctrl->cal_data.map;
+
+	offset = (uint32_t *)&csl_packet->payload;
+	offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+	cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+	/* Loop through multiple command buffers */
+	for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+		total_cmd_buf_in_bytes = cmd_desc[i].length;
+		processed_cmd_buf_in_bytes = 0;
+		if (!total_cmd_buf_in_bytes)
+			continue;
+		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+			&generic_pkt_addr, &pkt_len);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "Failed to get cpu buf");
+			return rc;
+		}
+		cmd_buf = (uint32_t *)generic_pkt_addr;
+		if (!cmd_buf) {
+			CAM_ERR(CAM_EEPROM, "invalid cmd buf");
+			return -EINVAL;
+		}
+		cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+		/* Loop through multiple cmd formats in one cmd buffer */
+		while (processed_cmd_buf_in_bytes < total_cmd_buf_in_bytes) {
+			cmm_hdr = (struct common_header *)cmd_buf;
+			switch (cmm_hdr->cmd_type) {
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+				i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+				/* Configure the following map slave address */
+				map[num_map + 1].saddr = i2c_info->slave_addr;
+				rc = cam_eeprom_update_slaveInfo(e_ctrl,
+					cmd_buf);
+				cmd_length_in_bytes =
+					sizeof(struct cam_cmd_i2c_info);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+					sizeof(uint32_t);
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+				cmd_length_in_bytes = total_cmd_buf_in_bytes;
+				rc = cam_sensor_update_power_settings(cmd_buf,
+					cmd_length_in_bytes, power_info);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+					sizeof(uint32_t);
+				if (rc) {
+					CAM_ERR(CAM_EEPROM, "Failed");
+					return rc;
+				}
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR:
+			case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD:
+			case CAMERA_SENSOR_CMD_TYPE_WAIT:
+				num_map++;
+				rc = cam_eeprom_parse_memory_map(
+					&e_ctrl->cal_data, cmd_buf,
+					total_cmd_buf_in_bytes,
+					&cmd_length_in_bytes, &num_map);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/sizeof(uint32_t);
+				break;
+			default:
+				break;
+			}
+		}
+		e_ctrl->cal_data.num_map = num_map + 1;
+	}
+	return rc;
+}
+
+/**
+ * cam_eeprom_get_cal_data - parse the userspace IO config and
+ *                                        copy read data to share with userspace
+ * @e_ctrl:     ctrl structure
+ * @csl_packet: csl packet received
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_get_cal_data(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_packet *csl_packet)
+{
+	struct cam_buf_io_cfg *io_cfg;
+	uint32_t              i = 0;
+	int                   rc = 0;
+	uintptr_t              buf_addr;
+	size_t                buf_size;
+	uint8_t               *read_buffer;
+
+	io_cfg = (struct cam_buf_io_cfg *) ((uint8_t *)
+		&csl_packet->payload +
+		csl_packet->io_configs_offset);
+
+	CAM_DBG(CAM_EEPROM, "number of IO configs: %d:",
+		csl_packet->num_io_configs);
+
+	for (i = 0; i < csl_packet->num_io_configs; i++) {
+		CAM_DBG(CAM_EEPROM, "Direction: %d:", io_cfg->direction);
+		if (io_cfg->direction == CAM_BUF_OUTPUT) {
+			rc = cam_mem_get_cpu_buf(io_cfg->mem_handle[0],
+				&buf_addr, &buf_size);
+			CAM_DBG(CAM_EEPROM, "buf_addr : %pK, buf_size : %zu\n",
+				(void *)buf_addr, buf_size);
+
+			read_buffer = (uint8_t *)buf_addr;
+			if (!read_buffer) {
+				CAM_ERR(CAM_EEPROM,
+					"invalid buffer to copy data");
+				return -EINVAL;
+			}
+			read_buffer += io_cfg->offsets[0];
+
+			if (buf_size < e_ctrl->cal_data.num_data) {
+				CAM_ERR(CAM_EEPROM,
+					"failed to copy, Invalid size");
+				return -EINVAL;
+			}
+
+			CAM_DBG(CAM_EEPROM, "copy the data, len:%d",
+				e_ctrl->cal_data.num_data);
+			memcpy(read_buffer, e_ctrl->cal_data.mapdata,
+					e_ctrl->cal_data.num_data);
+
+		} else {
+			CAM_ERR(CAM_EEPROM, "Invalid direction");
+			rc = -EINVAL;
+		}
+	}
+	return rc;
+}
+
+/**
+ * cam_eeprom_pkt_parse - Parse csl packet
+ * @e_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int32_t cam_eeprom_pkt_parse(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+	int32_t                         rc = 0;
+	struct cam_control             *ioctl_ctrl = NULL;
+	struct cam_config_dev_cmd       dev_config;
+	uintptr_t                        generic_pkt_addr;
+	size_t                          pkt_len;
+	struct cam_packet              *csl_packet = NULL;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+	ioctl_ctrl = (struct cam_control *)arg;
+
+	if (copy_from_user(&dev_config,
+		u64_to_user_ptr(ioctl_ctrl->handle),
+		sizeof(dev_config)))
+		return -EFAULT;
+	rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
+		&generic_pkt_addr, &pkt_len);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM,
+			"error in converting command Handle Error: %d", rc);
+		return rc;
+	}
+
+	if (dev_config.offset > pkt_len) {
+		CAM_ERR(CAM_EEPROM,
+			"Offset is out of bound: off: %lld, %zu",
+			dev_config.offset, pkt_len);
+		return -EINVAL;
+	}
+
+	csl_packet = (struct cam_packet *)
+		(generic_pkt_addr + (uint32_t)dev_config.offset);
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_EEPROM_PACKET_OPCODE_INIT:
+		if (e_ctrl->userspace_probe == false) {
+			rc = cam_eeprom_parse_read_memory_map(
+					e_ctrl->soc_info.dev->of_node, e_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_EEPROM, "Failed: rc : %d", rc);
+				return rc;
+			}
+			rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+			vfree(e_ctrl->cal_data.mapdata);
+			vfree(e_ctrl->cal_data.map);
+			e_ctrl->cal_data.num_data = 0;
+			e_ctrl->cal_data.num_map = 0;
+			CAM_DBG(CAM_EEPROM,
+				"Returning the data using kernel probe");
+			break;
+		}
+		rc = cam_eeprom_init_pkt_parser(e_ctrl, csl_packet);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM,
+				"Failed in parsing the pkt");
+			return rc;
+		}
+
+		e_ctrl->cal_data.mapdata =
+			vzalloc(e_ctrl->cal_data.num_data);
+		if (!e_ctrl->cal_data.mapdata) {
+			rc = -ENOMEM;
+			CAM_ERR(CAM_EEPROM, "failed");
+			goto error;
+		}
+
+		rc = cam_eeprom_power_up(e_ctrl,
+			&soc_private->power_info);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "failed rc %d", rc);
+			goto memdata_free;
+		}
+
+		e_ctrl->cam_eeprom_state = CAM_EEPROM_CONFIG;
+		rc = cam_eeprom_read_memory(e_ctrl, &e_ctrl->cal_data);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM,
+				"read_eeprom_memory failed");
+			goto power_down;
+		}
+
+		rc = cam_eeprom_get_cal_data(e_ctrl, csl_packet);
+		rc = cam_eeprom_power_down(e_ctrl);
+		e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+		vfree(e_ctrl->cal_data.mapdata);
+		vfree(e_ctrl->cal_data.map);
+		kfree(power_info->power_setting);
+		kfree(power_info->power_down_setting);
+		power_info->power_setting = NULL;
+		power_info->power_down_setting = NULL;
+		power_info->power_setting_size = 0;
+		power_info->power_down_setting_size = 0;
+		e_ctrl->cal_data.num_data = 0;
+		e_ctrl->cal_data.num_map = 0;
+		break;
+	default:
+		break;
+	}
+	return rc;
+power_down:
+	cam_eeprom_power_down(e_ctrl);
+memdata_free:
+	vfree(e_ctrl->cal_data.mapdata);
+error:
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	vfree(e_ctrl->cal_data.map);
+	e_ctrl->cal_data.num_data = 0;
+	e_ctrl->cal_data.num_map = 0;
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+	return rc;
+}
+
+void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int rc;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+	if (e_ctrl->cam_eeprom_state == CAM_EEPROM_INIT)
+		return;
+
+	if (e_ctrl->cam_eeprom_state == CAM_EEPROM_CONFIG) {
+		rc = cam_eeprom_power_down(e_ctrl);
+		if (rc < 0)
+			CAM_ERR(CAM_EEPROM, "EEPROM Power down failed");
+		e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+	}
+
+	if (e_ctrl->cam_eeprom_state == CAM_EEPROM_ACQUIRE) {
+		rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_EEPROM, "destroying the device hdl");
+
+		e_ctrl->bridge_intf.device_hdl = -1;
+		e_ctrl->bridge_intf.link_hdl = -1;
+		e_ctrl->bridge_intf.session_hdl = -1;
+
+		kfree(power_info->power_setting);
+		kfree(power_info->power_down_setting);
+		power_info->power_setting = NULL;
+		power_info->power_down_setting = NULL;
+		power_info->power_setting_size = 0;
+		power_info->power_down_setting_size = 0;
+	}
+
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+}
+
+/**
+ * cam_eeprom_driver_cmd - Handle eeprom cmds
+ * @e_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg)
+{
+	int                            rc = 0;
+	struct cam_eeprom_query_cap_t  eeprom_cap = {0};
+	struct cam_control            *cmd = (struct cam_control *)arg;
+
+	if (!e_ctrl || !cmd) {
+		CAM_ERR(CAM_EEPROM, "Invalid Arguments");
+		return -EINVAL;
+	}
+
+	if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+		CAM_ERR(CAM_EEPROM, "Invalid handle type: %d",
+			cmd->handle_type);
+		return -EINVAL;
+	}
+
+	mutex_lock(&(e_ctrl->eeprom_mutex));
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP:
+		eeprom_cap.slot_info = e_ctrl->soc_info.index;
+		if (e_ctrl->userspace_probe == false)
+			eeprom_cap.eeprom_kernel_probe = true;
+		else
+			eeprom_cap.eeprom_kernel_probe = false;
+
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&eeprom_cap,
+			sizeof(struct cam_eeprom_query_cap_t))) {
+			CAM_ERR(CAM_EEPROM, "Failed Copy to User");
+			return -EFAULT;
+			goto release_mutex;
+		}
+		CAM_DBG(CAM_EEPROM, "eeprom_cap: ID: %d", eeprom_cap.slot_info);
+		break;
+	case CAM_ACQUIRE_DEV:
+		rc = cam_eeprom_get_dev_handle(e_ctrl, arg);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "Failed to acquire dev");
+			goto release_mutex;
+		}
+		e_ctrl->cam_eeprom_state = CAM_EEPROM_ACQUIRE;
+		break;
+	case CAM_RELEASE_DEV:
+		if (e_ctrl->cam_eeprom_state != CAM_EEPROM_ACQUIRE) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_EEPROM,
+			"Not in right state to release : %d",
+			e_ctrl->cam_eeprom_state);
+			goto release_mutex;
+		}
+
+		if (e_ctrl->bridge_intf.device_hdl == -1) {
+			CAM_ERR(CAM_EEPROM,
+				"Invalid Handles: link hdl: %d device hdl: %d",
+				e_ctrl->bridge_intf.device_hdl,
+				e_ctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = cam_destroy_device_hdl(e_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_EEPROM,
+				"failed in destroying the device hdl");
+		e_ctrl->bridge_intf.device_hdl = -1;
+		e_ctrl->bridge_intf.link_hdl = -1;
+		e_ctrl->bridge_intf.session_hdl = -1;
+		e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+		break;
+	case CAM_CONFIG_DEV:
+		rc = cam_eeprom_pkt_parse(e_ctrl, arg);
+		if (rc) {
+			CAM_ERR(CAM_EEPROM, "Failed in eeprom pkt Parsing");
+			goto release_mutex;
+		}
+		break;
+	default:
+		CAM_DBG(CAM_EEPROM, "invalid opcode");
+		break;
+	}
+
+release_mutex:
+	mutex_unlock(&(e_ctrl->eeprom_mutex));
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
new file mode 100644
index 0000000..5786065
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_core.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_EEPROM_CORE_H_
+#define _CAM_EEPROM_CORE_H_
+
+#include "cam_eeprom_dev.h"
+
+int32_t cam_eeprom_driver_cmd(struct cam_eeprom_ctrl_t *e_ctrl, void *arg);
+int32_t cam_eeprom_parse_read_memory_map(struct device_node *of_node,
+	struct cam_eeprom_ctrl_t *e_ctrl);
+/**
+ * @e_ctrl: EEPROM ctrl structure
+ *
+ * This API handles the shutdown ioctl/close
+ */
+void cam_eeprom_shutdown(struct cam_eeprom_ctrl_t *e_ctrl);
+
+#endif
+/* _CAM_EEPROM_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
new file mode 100644
index 0000000..ad14f6e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_eeprom_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_eeprom_soc.h"
+#include "cam_eeprom_core.h"
+#include "cam_debug_util.h"
+
+static long cam_eeprom_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int                       rc     = 0;
+	struct cam_eeprom_ctrl_t *e_ctrl = v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_eeprom_driver_cmd(e_ctrl, arg);
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_eeprom_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_eeprom_ctrl_t *e_ctrl =
+		v4l2_get_subdevdata(sd);
+
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "e_ctrl ptr is NULL");
+			return -EINVAL;
+	}
+
+	mutex_lock(&(e_ctrl->eeprom_mutex));
+	cam_eeprom_shutdown(e_ctrl);
+	mutex_unlock(&(e_ctrl->eeprom_mutex));
+
+	return 0;
+}
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_eeprom_i2c_info_t *i2c_info)
+{
+	struct cam_sensor_cci_client        *cci_client = NULL;
+
+	if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+		cci_client = e_ctrl->io_master_info.cci_client;
+		if (!cci_client) {
+			CAM_ERR(CAM_EEPROM, "failed: cci_client %pK",
+				cci_client);
+			return -EINVAL;
+		}
+		cci_client->cci_i2c_master = e_ctrl->cci_i2c_master;
+		cci_client->sid = (i2c_info->slave_addr) >> 1;
+		cci_client->retries = 3;
+		cci_client->id_map = 0;
+		cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+	} else if (e_ctrl->io_master_info.master_type == I2C_MASTER) {
+		e_ctrl->io_master_info.client->addr = i2c_info->slave_addr;
+		CAM_DBG(CAM_EEPROM, "Slave addr: 0x%x", i2c_info->slave_addr);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_eeprom_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_EEPROM,
+			"Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_eeprom_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM,
+				"Failed in eeprom suddev handling rc %d",
+				rc);
+			return rc;
+		}
+		break;
+	default:
+		CAM_ERR(CAM_EEPROM, "Invalid compat ioctl: %d", cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_EEPROM,
+				"Failed to copy from user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops cam_eeprom_internal_ops = {
+	.close = cam_eeprom_subdev_close,
+};
+
+static struct v4l2_subdev_core_ops cam_eeprom_subdev_core_ops = {
+	.ioctl = cam_eeprom_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_eeprom_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_eeprom_subdev_ops = {
+	.core = &cam_eeprom_subdev_core_ops,
+};
+
+static int cam_eeprom_init_subdev(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int rc = 0;
+
+	e_ctrl->v4l2_dev_str.internal_ops = &cam_eeprom_internal_ops;
+	e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+	strlcpy(e_ctrl->device_name, CAM_EEPROM_NAME,
+		sizeof(e_ctrl->device_name));
+	e_ctrl->v4l2_dev_str.name = e_ctrl->device_name;
+	e_ctrl->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	e_ctrl->v4l2_dev_str.ent_function = CAM_EEPROM_DEVICE_TYPE;
+	e_ctrl->v4l2_dev_str.token = e_ctrl;
+
+	rc = cam_register_subdev(&(e_ctrl->v4l2_dev_str));
+	if (rc)
+		CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev");
+
+	return rc;
+}
+
+static int cam_eeprom_i2c_driver_probe(struct i2c_client *client,
+	 const struct i2c_device_id *id)
+{
+	int                             rc = 0;
+	struct cam_eeprom_ctrl_t       *e_ctrl = NULL;
+	struct cam_eeprom_soc_private  *soc_private = NULL;
+	struct cam_hw_soc_info         *soc_info = NULL;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CAM_ERR(CAM_EEPROM, "i2c_check_functionality failed");
+		goto probe_failure;
+	}
+
+	e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "kzalloc failed");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	soc_private = kzalloc(sizeof(*soc_private), GFP_KERNEL);
+	if (!soc_private)
+		goto ectrl_free;
+
+	e_ctrl->soc_info.soc_private = soc_private;
+
+	i2c_set_clientdata(client, e_ctrl);
+
+	mutex_init(&(e_ctrl->eeprom_mutex));
+
+	soc_info = &e_ctrl->soc_info;
+	soc_info->dev = &client->dev;
+	soc_info->dev_name = client->name;
+	e_ctrl->io_master_info.master_type = I2C_MASTER;
+	e_ctrl->io_master_info.client = client;
+	e_ctrl->eeprom_device_type = MSM_CAMERA_I2C_DEVICE;
+	e_ctrl->cal_data.mapdata = NULL;
+	e_ctrl->cal_data.map = NULL;
+	e_ctrl->userspace_probe = false;
+
+	rc = cam_eeprom_parse_dt(e_ctrl);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc);
+		goto free_soc;
+	}
+
+	rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc);
+		goto free_soc;
+	}
+
+	rc = cam_eeprom_init_subdev(e_ctrl);
+	if (rc)
+		goto free_soc;
+
+	if (soc_private->i2c_info.slave_addr != 0)
+		e_ctrl->io_master_info.client->addr =
+			soc_private->i2c_info.slave_addr;
+
+	e_ctrl->bridge_intf.device_hdl = -1;
+	e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+	e_ctrl->bridge_intf.ops.link_setup = NULL;
+	e_ctrl->bridge_intf.ops.apply_req = NULL;
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+
+	return rc;
+free_soc:
+	kfree(soc_private);
+ectrl_free:
+	kfree(e_ctrl);
+probe_failure:
+	return rc;
+}
+
+static int cam_eeprom_i2c_driver_remove(struct i2c_client *client)
+{
+	int                             i;
+	struct v4l2_subdev             *sd = i2c_get_clientdata(client);
+	struct cam_eeprom_ctrl_t       *e_ctrl;
+	struct cam_eeprom_soc_private  *soc_private;
+	struct cam_hw_soc_info         *soc_info;
+
+	if (!sd) {
+		CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+		return -EINVAL;
+	}
+
+	e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	if (!soc_private) {
+		CAM_ERR(CAM_EEPROM, "soc_info.soc_private is NULL");
+		return -EINVAL;
+	}
+
+	soc_info = &e_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+	mutex_destroy(&(e_ctrl->eeprom_mutex));
+	kfree(soc_private);
+	kfree(e_ctrl->io_master_info.cci_client);
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
+	kfree(e_ctrl);
+
+	return 0;
+}
+
+static int cam_eeprom_spi_setup(struct spi_device *spi)
+{
+	struct cam_eeprom_ctrl_t       *e_ctrl = NULL;
+	struct cam_hw_soc_info         *soc_info = NULL;
+	struct cam_sensor_spi_client   *spi_client;
+	struct cam_eeprom_soc_private  *eb_info;
+	struct cam_sensor_power_ctrl_t *power_info = NULL;
+	int                             rc = 0;
+
+	e_ctrl = kzalloc(sizeof(*e_ctrl), GFP_KERNEL);
+	if (!e_ctrl)
+		return -ENOMEM;
+
+	soc_info = &e_ctrl->soc_info;
+	soc_info->dev = &spi->dev;
+	soc_info->dev_name = spi->modalias;
+
+	e_ctrl->v4l2_dev_str.ops = &cam_eeprom_subdev_ops;
+	e_ctrl->userspace_probe = false;
+	e_ctrl->cal_data.mapdata = NULL;
+	e_ctrl->cal_data.map = NULL;
+
+	spi_client = kzalloc(sizeof(*spi_client), GFP_KERNEL);
+	if (!spi_client) {
+		kfree(e_ctrl);
+		return -ENOMEM;
+	}
+
+	eb_info = kzalloc(sizeof(*eb_info), GFP_KERNEL);
+	if (!eb_info)
+		goto spi_free;
+	e_ctrl->soc_info.soc_private = eb_info;
+
+	e_ctrl->eeprom_device_type = MSM_CAMERA_SPI_DEVICE;
+	e_ctrl->io_master_info.spi_client = spi_client;
+	e_ctrl->io_master_info.master_type = SPI_MASTER;
+	spi_client->spi_master = spi;
+
+	power_info = &eb_info->power_info;
+	power_info->dev = &spi->dev;
+
+	/* set spi instruction info */
+	spi_client->retry_delay = 1;
+	spi_client->retries = 0;
+
+	/* Initialize mutex */
+	mutex_init(&(e_ctrl->eeprom_mutex));
+
+	rc = cam_eeprom_parse_dt(e_ctrl);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: spi soc init rc %d", rc);
+		goto board_free;
+	}
+
+	rc = cam_eeprom_spi_parse_of(spi_client);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "Device tree parsing error");
+		goto board_free;
+	}
+
+	rc = cam_eeprom_init_subdev(e_ctrl);
+	if (rc)
+		goto board_free;
+
+	e_ctrl->bridge_intf.device_hdl = -1;
+	e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+	e_ctrl->bridge_intf.ops.link_setup = NULL;
+	e_ctrl->bridge_intf.ops.apply_req = NULL;
+
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+	return rc;
+
+board_free:
+	kfree(e_ctrl->soc_info.soc_private);
+spi_free:
+	kfree(spi_client);
+	kfree(e_ctrl);
+	return rc;
+}
+
+static int cam_eeprom_spi_driver_probe(struct spi_device *spi)
+{
+	spi->bits_per_word = 8;
+	spi->mode = SPI_MODE_0;
+	spi_setup(spi);
+
+	CAM_DBG(CAM_EEPROM, "irq[%d] cs[%x] CPHA[%x] CPOL[%x] CS_HIGH[%x]",
+		spi->irq, spi->chip_select, (spi->mode & SPI_CPHA) ? 1 : 0,
+		(spi->mode & SPI_CPOL) ? 1 : 0,
+		(spi->mode & SPI_CS_HIGH) ? 1 : 0);
+	CAM_DBG(CAM_EEPROM, "max_speed[%u]", spi->max_speed_hz);
+
+	return cam_eeprom_spi_setup(spi);
+}
+
+static int cam_eeprom_spi_driver_remove(struct spi_device *sdev)
+{
+	int                             i;
+	struct v4l2_subdev             *sd = spi_get_drvdata(sdev);
+	struct cam_eeprom_ctrl_t       *e_ctrl;
+	struct cam_eeprom_soc_private  *soc_private;
+	struct cam_hw_soc_info         *soc_info;
+
+	if (!sd) {
+		CAM_ERR(CAM_EEPROM, "Subdevice is NULL");
+		return -EINVAL;
+	}
+
+	e_ctrl = (struct cam_eeprom_ctrl_t *)v4l2_get_subdevdata(sd);
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+		return -EINVAL;
+	}
+
+	soc_info = &e_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+	kfree(e_ctrl->io_master_info.spi_client);
+	soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	if (soc_private) {
+		kfree(soc_private->power_info.gpio_num_info);
+		kfree(soc_private);
+	}
+	mutex_destroy(&(e_ctrl->eeprom_mutex));
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
+	kfree(e_ctrl);
+
+	return 0;
+}
+
+static int32_t cam_eeprom_platform_driver_probe(
+	struct platform_device *pdev)
+{
+	int32_t                         rc = 0;
+	struct cam_eeprom_ctrl_t       *e_ctrl = NULL;
+	struct cam_eeprom_soc_private  *soc_private = NULL;
+
+	e_ctrl = kzalloc(sizeof(struct cam_eeprom_ctrl_t), GFP_KERNEL);
+	if (!e_ctrl)
+		return -ENOMEM;
+
+	e_ctrl->soc_info.pdev = pdev;
+	e_ctrl->soc_info.dev = &pdev->dev;
+	e_ctrl->soc_info.dev_name = pdev->name;
+	e_ctrl->eeprom_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+	e_ctrl->cal_data.mapdata = NULL;
+	e_ctrl->cal_data.map = NULL;
+	e_ctrl->userspace_probe = false;
+
+	e_ctrl->io_master_info.master_type = CCI_MASTER;
+	e_ctrl->io_master_info.cci_client = kzalloc(
+		sizeof(struct cam_sensor_cci_client), GFP_KERNEL);
+	if (!e_ctrl->io_master_info.cci_client) {
+		rc = -ENOMEM;
+		goto free_e_ctrl;
+	}
+
+	soc_private = kzalloc(sizeof(struct cam_eeprom_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto free_cci_client;
+	}
+	e_ctrl->soc_info.soc_private = soc_private;
+	soc_private->power_info.dev = &pdev->dev;
+
+	/* Initialize mutex */
+	mutex_init(&(e_ctrl->eeprom_mutex));
+	rc = cam_eeprom_parse_dt(e_ctrl);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: soc init rc %d", rc);
+		goto free_soc;
+	}
+	rc = cam_eeprom_update_i2c_info(e_ctrl, &soc_private->i2c_info);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "failed: to update i2c info rc %d", rc);
+		goto free_soc;
+	}
+
+	rc = cam_eeprom_init_subdev(e_ctrl);
+	if (rc)
+		goto free_soc;
+
+	e_ctrl->bridge_intf.device_hdl = -1;
+	e_ctrl->bridge_intf.ops.get_dev_info = NULL;
+	e_ctrl->bridge_intf.ops.link_setup = NULL;
+	e_ctrl->bridge_intf.ops.apply_req = NULL;
+
+	platform_set_drvdata(pdev, e_ctrl);
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, e_ctrl);
+
+	e_ctrl->cam_eeprom_state = CAM_EEPROM_INIT;
+
+	return rc;
+free_soc:
+	kfree(soc_private);
+free_cci_client:
+	kfree(e_ctrl->io_master_info.cci_client);
+free_e_ctrl:
+	kfree(e_ctrl);
+	return rc;
+}
+
+static int cam_eeprom_platform_driver_remove(struct platform_device *pdev)
+{
+	int                        i;
+	struct cam_eeprom_ctrl_t  *e_ctrl;
+	struct cam_hw_soc_info    *soc_info;
+
+	e_ctrl = platform_get_drvdata(pdev);
+	if (!e_ctrl) {
+		CAM_ERR(CAM_EEPROM, "eeprom device is NULL");
+		return -EINVAL;
+	}
+
+	soc_info = &e_ctrl->soc_info;
+
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+	mutex_destroy(&(e_ctrl->eeprom_mutex));
+	kfree(soc_info->soc_private);
+	kfree(e_ctrl->io_master_info.cci_client);
+	platform_set_drvdata(pdev, NULL);
+	v4l2_set_subdevdata(&e_ctrl->v4l2_dev_str.sd, NULL);
+	kfree(e_ctrl);
+	return 0;
+}
+
+static const struct of_device_id cam_eeprom_dt_match[] = {
+	{ .compatible = "qcom,eeprom" },
+	{ }
+};
+
+
+MODULE_DEVICE_TABLE(of, cam_eeprom_dt_match);
+
+static struct platform_driver cam_eeprom_platform_driver = {
+	.driver = {
+		.name = "qcom,eeprom",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_eeprom_dt_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = cam_eeprom_platform_driver_probe,
+	.remove = cam_eeprom_platform_driver_remove,
+};
+
+static const struct i2c_device_id cam_eeprom_i2c_id[] = {
+	{ "msm_eeprom", (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_eeprom_i2c_driver = {
+	.id_table = cam_eeprom_i2c_id,
+	.probe  = cam_eeprom_i2c_driver_probe,
+	.remove = cam_eeprom_i2c_driver_remove,
+	.driver = {
+		.name = "msm_eeprom",
+	},
+};
+
+static struct spi_driver cam_eeprom_spi_driver = {
+	.driver = {
+		.name = "qcom_eeprom",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_eeprom_dt_match,
+	},
+	.probe = cam_eeprom_spi_driver_probe,
+	.remove = cam_eeprom_spi_driver_remove,
+};
+static int __init cam_eeprom_driver_init(void)
+{
+	int rc = 0;
+
+	rc = platform_driver_register(&cam_eeprom_platform_driver);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "platform_driver_register failed rc = %d",
+			rc);
+		return rc;
+	}
+
+	rc = spi_register_driver(&cam_eeprom_spi_driver);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "spi_register_driver failed rc = %d", rc);
+		return rc;
+	}
+
+	rc = i2c_add_driver(&cam_eeprom_i2c_driver);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "i2c_add_driver failed rc = %d", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+static void __exit cam_eeprom_driver_exit(void)
+{
+	platform_driver_unregister(&cam_eeprom_platform_driver);
+	spi_unregister_driver(&cam_eeprom_spi_driver);
+	i2c_del_driver(&cam_eeprom_i2c_driver);
+}
+
+module_init(cam_eeprom_driver_init);
+module_exit(cam_eeprom_driver_exit);
+MODULE_DESCRIPTION("CAM EEPROM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
new file mode 100644
index 0000000..72b0a5b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_dev.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_EEPROM_DEV_H_
+#define _CAM_EEPROM_DEV_H_
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_sensor.h>
+#include <cam_sensor_i2c.h>
+#include <cam_sensor_spi.h>
+#include <cam_sensor_io.h>
+#include <cam_cci_dev.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include <cam_subdev.h>
+#include "cam_soc_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+	static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+#define PROPERTY_MAXSIZE 32
+
+#define MSM_EEPROM_MEMORY_MAP_MAX_SIZE         80
+#define MSM_EEPROM_MAX_MEM_MAP_CNT             8
+#define MSM_EEPROM_MEM_MAP_PROPERTIES_CNT      8
+
+enum cam_eeprom_state {
+	CAM_EEPROM_INIT,
+	CAM_EEPROM_ACQUIRE,
+	CAM_EEPROM_CONFIG,
+};
+
+/**
+ * struct cam_eeprom_map_t - eeprom map
+ * @data_type       :   Data type
+ * @addr_type       :   Address type
+ * @addr            :   Address
+ * @data            :   data
+ * @delay           :   Delay
+ *
+ */
+struct cam_eeprom_map_t {
+	uint32_t valid_size;
+	uint32_t addr;
+	uint32_t addr_type;
+	uint32_t data;
+	uint32_t data_type;
+	uint32_t delay;
+};
+
+/**
+ * struct cam_eeprom_memory_map_t - eeprom memory map types
+ * @page            :   page memory
+ * @pageen          :   pageen memory
+ * @poll            :   poll memory
+ * @mem             :   mem
+ * @saddr           :   slave addr
+ *
+ */
+struct cam_eeprom_memory_map_t {
+	struct cam_eeprom_map_t page;
+	struct cam_eeprom_map_t pageen;
+	struct cam_eeprom_map_t poll;
+	struct cam_eeprom_map_t mem;
+	uint32_t saddr;
+};
+
+/**
+ * struct cam_eeprom_memory_block_t - eeprom mem block info
+ * @map             :   eeprom memory map
+ * @num_map         :   number of map blocks
+ * @mapdata         :   map data
+ * @cmd_type        :   size of total mapdata
+ *
+ */
+struct cam_eeprom_memory_block_t {
+	struct cam_eeprom_memory_map_t *map;
+	uint32_t num_map;
+	uint8_t *mapdata;
+	uint32_t num_data;
+};
+
+/**
+ * struct cam_eeprom_cmm_t - camera multimodule
+ * @cmm_support     :   cmm support flag
+ * @cmm_compression :   cmm compression flag
+ * @cmm_offset      :   cmm data start offset
+ * @cmm_size        :   cmm data size
+ *
+ */
+struct cam_eeprom_cmm_t {
+	uint32_t cmm_support;
+	uint32_t cmm_compression;
+	uint32_t cmm_offset;
+	uint32_t cmm_size;
+};
+
+/**
+ * struct cam_eeprom_i2c_info_t - I2C info
+ * @slave_addr      :   slave address
+ * @i2c_freq_mode   :   i2c frequency mode
+ *
+ */
+struct cam_eeprom_i2c_info_t {
+	uint16_t slave_addr;
+	uint8_t i2c_freq_mode;
+};
+
+/**
+ * struct cam_eeprom_soc_private - eeprom soc private data structure
+ * @eeprom_name     :   eeprom name
+ * @i2c_info        :   i2c info structure
+ * @power_info      :   eeprom power info
+ * @cmm_data        :   cmm data
+ *
+ */
+struct cam_eeprom_soc_private {
+	const char *eeprom_name;
+	struct cam_eeprom_i2c_info_t i2c_info;
+	struct cam_sensor_power_ctrl_t power_info;
+	struct cam_eeprom_cmm_t cmm_data;
+};
+
+/**
+ * struct cam_eeprom_intf_params - bridge interface params
+ * @device_hdl   : Device Handle
+ * @session_hdl  : Session Handle
+ * @ops          : KMD operations
+ * @crm_cb       : Callback API pointers
+ */
+struct cam_eeprom_intf_params {
+	int32_t device_hdl;
+	int32_t session_hdl;
+	int32_t link_hdl;
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_cmd_conditional_wait - Conditional wait command
+ * @pdev            :   platform device
+ * @spi             :   spi device
+ * @eeprom_mutex    :   eeprom mutex
+ * @soc_info        :   eeprom soc related info
+ * @io_master_info  :   Information about the communication master
+ * @gpio_num_info   :   gpio info
+ * @cci_i2c_master  :   I2C structure
+ * @v4l2_dev_str    :   V4L2 device structure
+ * @bridge_intf     :   bridge interface params
+ * @cam_eeprom_state:   eeprom_device_state
+ * @userspace_probe :   flag indicates userspace or kernel probe
+ * @cal_data        :   Calibration data
+ * @device_name     :   Device name
+ *
+ */
+struct cam_eeprom_ctrl_t {
+	struct platform_device *pdev;
+	struct spi_device *spi;
+	struct mutex eeprom_mutex;
+	struct cam_hw_soc_info soc_info;
+	struct camera_io_master io_master_info;
+	struct msm_camera_gpio_num_info *gpio_num_info;
+	enum cci_i2c_master_t cci_i2c_master;
+	enum cci_device_num cci_num;
+	struct cam_subdev v4l2_dev_str;
+	struct cam_eeprom_intf_params bridge_intf;
+	enum msm_camera_device_type_t eeprom_device_type;
+	enum cam_eeprom_state cam_eeprom_state;
+	bool userspace_probe;
+	struct cam_eeprom_memory_block_t cal_data;
+	char device_name[20];
+};
+
+int32_t cam_eeprom_update_i2c_info(struct cam_eeprom_ctrl_t *e_ctrl,
+	struct cam_eeprom_i2c_info_t *i2c_info);
+
+#endif /*_CAM_EEPROM_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
new file mode 100644
index 0000000..522fc0d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+
+#include "cam_eeprom_soc.h"
+#include "cam_debug_util.h"
+
+#define cam_eeprom_spi_parse_cmd(spi_dev, name, out)          \
+	{                                                     \
+		spi_dev->cmd_tbl.name.opcode = out[0];        \
+		spi_dev->cmd_tbl.name.addr_len = out[1];      \
+		spi_dev->cmd_tbl.name.dummy_len = out[2];     \
+		spi_dev->cmd_tbl.name.delay_intv = out[3];    \
+		spi_dev->cmd_tbl.name.delay_count = out[4];   \
+	}
+
+int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *spi_dev)
+{
+	int rc = -EFAULT;
+	uint32_t tmp[5];
+
+	rc  = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-read", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get read data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-readseq", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read_seq, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get readseq data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-queryid", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, query_id, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get queryid data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-pprog", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, page_program, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get page program data");
+		return -EFAULT;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-wenable", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, write_enable, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get write enable data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-readst", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, read_status, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get readdst data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"spiop-erase", tmp, 5);
+	if (!rc) {
+		cam_eeprom_spi_parse_cmd(spi_dev, erase, tmp);
+	} else {
+		CAM_ERR(CAM_EEPROM, "Failed to get erase data");
+		return rc;
+	}
+
+	rc = of_property_read_u32_array(spi_dev->spi_master->dev.of_node,
+		"eeprom-id", tmp, 2);
+	if (rc) {
+		CAM_ERR(CAM_EEPROM, "Failed to get eeprom id");
+		return rc;
+	}
+
+	spi_dev->mfr_id0 = tmp[0];
+	spi_dev->device_id0 = tmp[1];
+
+	return 0;
+}
+
+/*
+ * cam_eeprom_parse_memory_map() - parse memory map in device node
+ * @of:         device node
+ * @data:       memory block for output
+ *
+ * This functions parses @of to fill @data.  It allocates map itself, parses
+ * the @of node, calculate total data length, and allocates required buffer.
+ * It only fills the map, but does not perform actual reading.
+ */
+int cam_eeprom_parse_dt_memory_map(struct device_node *node,
+	struct cam_eeprom_memory_block_t *data)
+{
+	int       i, rc = 0;
+	char      property[PROPERTY_MAXSIZE];
+	uint32_t  count = MSM_EEPROM_MEM_MAP_PROPERTIES_CNT;
+	struct    cam_eeprom_memory_map_t *map;
+
+	snprintf(property, PROPERTY_MAXSIZE, "num-blocks");
+	rc = of_property_read_u32(node, property, &data->num_map);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "failed: num-blocks not available rc %d",
+			rc);
+		return rc;
+	}
+
+	map = vzalloc((sizeof(*map) * data->num_map));
+	if (!map) {
+		rc = -ENOMEM;
+		return rc;
+	}
+	data->map = map;
+
+	for (i = 0; i < data->num_map; i++) {
+		snprintf(property, PROPERTY_MAXSIZE, "page%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].page, count);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM, "failed: page not available rc %d",
+				rc);
+			goto ERROR;
+		}
+
+		snprintf(property, PROPERTY_MAXSIZE, "pageen%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].pageen, count);
+		if (rc < 0)
+			CAM_DBG(CAM_EEPROM, "pageen not needed");
+
+		snprintf(property, PROPERTY_MAXSIZE, "saddr%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].saddr, 1);
+		if (rc < 0)
+			CAM_DBG(CAM_EEPROM, "saddr not needed - block %d", i);
+
+		snprintf(property, PROPERTY_MAXSIZE, "poll%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].poll, count);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM, "failed: poll not available rc %d",
+				rc);
+			goto ERROR;
+		}
+
+		snprintf(property, PROPERTY_MAXSIZE, "mem%d", i);
+		rc = of_property_read_u32_array(node, property,
+			(uint32_t *) &map[i].mem, count);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM, "failed: mem not available rc %d",
+				rc);
+			goto ERROR;
+		}
+		data->num_data += map[i].mem.valid_size;
+	}
+
+	data->mapdata = vzalloc(data->num_data);
+	if (!data->mapdata) {
+		rc = -ENOMEM;
+		goto ERROR;
+	}
+	return rc;
+
+ERROR:
+	vfree(data->map);
+	memset(data, 0, sizeof(*data));
+	return rc;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * Parses eeprom dt
+ */
+static int cam_eeprom_get_dt_data(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int                             rc = 0;
+	struct cam_hw_soc_info         *soc_info = &e_ctrl->soc_info;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+	struct device_node             *of_node = NULL;
+
+	of_node = soc_info->dev->of_node;
+
+	if (e_ctrl->userspace_probe == false) {
+		rc = cam_get_dt_power_setting_data(of_node,
+			soc_info, power_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM, "failed in getting power settings");
+			return rc;
+		}
+	}
+
+	if (!soc_info->gpio_data) {
+		CAM_INFO(CAM_EEPROM, "No GPIO found");
+		return 0;
+	}
+
+	if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+		CAM_INFO(CAM_EEPROM, "No GPIO found");
+		return -EINVAL;
+	}
+
+	rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+		&power_info->gpio_num_info);
+	if ((rc < 0) || (!power_info->gpio_num_info)) {
+		CAM_ERR(CAM_EEPROM, "No/Error EEPROM GPIOs");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+/**
+ * @eb_info: eeprom private data structure
+ * @of_node: eeprom device node
+ *
+ * This function parses the eeprom dt to get the MM data
+ */
+static int cam_eeprom_cmm_dts(struct cam_eeprom_soc_private *eb_info,
+	struct device_node *of_node)
+{
+	int                      rc = 0;
+	struct cam_eeprom_cmm_t *cmm_data = &eb_info->cmm_data;
+
+	cmm_data->cmm_support =
+		of_property_read_bool(of_node, "cmm-data-support");
+	if (!cmm_data->cmm_support) {
+		CAM_DBG(CAM_EEPROM, "No cmm support");
+		return 0;
+	}
+
+	cmm_data->cmm_compression =
+		of_property_read_bool(of_node, "cmm-data-compressed");
+
+	rc = of_property_read_u32(of_node, "cmm-data-offset",
+		&cmm_data->cmm_offset);
+	if (rc < 0)
+		CAM_DBG(CAM_EEPROM, "No MM offset data rc %d", rc);
+
+	rc = of_property_read_u32(of_node, "cmm-data-size",
+		&cmm_data->cmm_size);
+	if (rc < 0)
+		CAM_DBG(CAM_EEPROM, "No MM size data rc %d", rc);
+
+	CAM_DBG(CAM_EEPROM, "cmm_compr %d, cmm_offset %d, cmm_size %d",
+		cmm_data->cmm_compression, cmm_data->cmm_offset,
+		cmm_data->cmm_size);
+	return 0;
+}
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * This function is called from cam_eeprom_platform/i2c/spi_driver_probe
+ * it parses the eeprom dt node and decides for userspace or kernel probe.
+ */
+int cam_eeprom_parse_dt(struct cam_eeprom_ctrl_t *e_ctrl)
+{
+	int                             i, rc = 0;
+	struct cam_hw_soc_info         *soc_info = &e_ctrl->soc_info;
+	struct device_node             *of_node = NULL;
+	struct cam_eeprom_soc_private  *soc_private =
+		(struct cam_eeprom_soc_private *)e_ctrl->soc_info.soc_private;
+	uint32_t                        temp;
+
+	if (!soc_info->dev) {
+		CAM_ERR(CAM_EEPROM, "Dev is NULL");
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "Failed to read DT properties rc : %d", rc);
+		return rc;
+	}
+
+	of_node = soc_info->dev->of_node;
+
+	rc = of_property_read_string(of_node, "eeprom-name",
+		&soc_private->eeprom_name);
+	if (rc < 0) {
+		CAM_DBG(CAM_EEPROM, "kernel probe is not enabled");
+		e_ctrl->userspace_probe = true;
+	}
+
+	if (e_ctrl->io_master_info.master_type == CCI_MASTER) {
+		rc = of_property_read_u32(of_node, "cci-master",
+			&e_ctrl->cci_i2c_master);
+		if (rc < 0 || (e_ctrl->cci_i2c_master >= MASTER_MAX)) {
+			CAM_DBG(CAM_EEPROM, "failed rc %d", rc);
+			rc = -EFAULT;
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node, "cci-device",
+			&e_ctrl->cci_num);
+		CAM_DBG(CAM_ACTUATOR, "cci-device %d, rc %d",
+			e_ctrl->cci_num, rc);
+		if (rc < 0) {
+			/* Set default master 0 */
+			e_ctrl->cci_num = CCI_DEVICE_0;
+			rc = 0;
+		}
+		e_ctrl->io_master_info.cci_client->cci_device = e_ctrl->cci_num;
+	}
+
+	if (e_ctrl->io_master_info.master_type == SPI_MASTER) {
+		rc = cam_eeprom_cmm_dts(soc_private, soc_info->dev->of_node);
+		if (rc < 0)
+			CAM_DBG(CAM_EEPROM, "MM data not available rc %d", rc);
+	}
+
+	rc = cam_eeprom_get_dt_data(e_ctrl);
+	if (rc < 0)
+		CAM_DBG(CAM_EEPROM, "failed: eeprom get dt data rc %d", rc);
+
+	if ((e_ctrl->userspace_probe == false) &&
+			(e_ctrl->io_master_info.master_type != SPI_MASTER)) {
+		rc = of_property_read_u32(of_node, "slave-addr", &temp);
+		if (rc < 0)
+			CAM_DBG(CAM_EEPROM, "failed: no slave-addr rc %d", rc);
+
+		soc_private->i2c_info.slave_addr = temp;
+
+		rc = of_property_read_u32(of_node, "i2c-freq-mode", &temp);
+		soc_private->i2c_info.i2c_freq_mode = temp;
+		if (rc < 0) {
+			CAM_ERR(CAM_EEPROM,
+				"i2c-freq-mode read fail %d", rc);
+			soc_private->i2c_info.i2c_freq_mode = 0;
+		}
+		if (soc_private->i2c_info.i2c_freq_mode >= I2C_MAX_MODES) {
+			CAM_ERR(CAM_EEPROM, "invalid i2c_freq_mode = %d",
+				soc_private->i2c_info.i2c_freq_mode);
+			soc_private->i2c_info.i2c_freq_mode = 0;
+		}
+		CAM_DBG(CAM_EEPROM, "slave-addr = 0x%X",
+			soc_private->i2c_info.slave_addr);
+	}
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = devm_clk_get(soc_info->dev,
+			soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			CAM_ERR(CAM_EEPROM, "get failed for %s",
+				soc_info->clk_name[i]);
+			rc = -ENOENT;
+			return rc;
+		}
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
new file mode 100644
index 0000000..804d20e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_eeprom/cam_eeprom_soc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_EEPROM_SOC_H_
+#define _CAM_EEPROM_SOC_H_
+
+#include "cam_eeprom_dev.h"
+
+int cam_eeprom_spi_parse_of(struct cam_sensor_spi_client *client);
+
+int cam_eeprom_parse_dt_memory_map(struct device_node *of,
+	struct cam_eeprom_memory_block_t *data);
+
+int cam_eeprom_parse_dt(struct cam_eeprom_ctrl_t *e_ctrl);
+#endif/* _CAM_EEPROM_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
new file mode 100644
index 0000000..d5e95f2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_flash_dev.o cam_flash_core.o cam_flash_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
new file mode 100644
index 0000000..34371d0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -0,0 +1,1652 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_flash_core.h"
+#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
+
+static int cam_flash_prepare(struct cam_flash_ctrl *flash_ctrl,
+	bool regulator_enable)
+{
+	int rc = 0;
+	struct cam_flash_private_soc *soc_private =
+		(struct cam_flash_private_soc *)
+		flash_ctrl->soc_info.soc_private;
+
+	if (!(flash_ctrl->switch_trigger)) {
+		CAM_ERR(CAM_FLASH, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if (soc_private->is_wled_flash) {
+		if (regulator_enable &&
+			flash_ctrl->is_regulator_enabled == false) {
+			rc = wled_flash_led_prepare(flash_ctrl->switch_trigger,
+				ENABLE_REGULATOR, NULL);
+			if (rc) {
+				CAM_ERR(CAM_FLASH, "enable reg failed: rc: %d",
+					rc);
+				return rc;
+			}
+
+			flash_ctrl->is_regulator_enabled = true;
+		} else if (!regulator_enable &&
+				flash_ctrl->is_regulator_enabled == true) {
+			rc = wled_flash_led_prepare(flash_ctrl->switch_trigger,
+				DISABLE_REGULATOR, NULL);
+			if (rc) {
+				CAM_ERR(CAM_FLASH, "disalbe reg fail: rc: %d",
+					rc);
+				return rc;
+			}
+
+			flash_ctrl->is_regulator_enabled = false;
+		} else {
+			CAM_ERR(CAM_FLASH, "Wrong Wled flash state: %d",
+				flash_ctrl->flash_state);
+			rc = -EINVAL;
+		}
+	} else {
+		if (regulator_enable &&
+			(flash_ctrl->is_regulator_enabled == false)) {
+			rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+				ENABLE_REGULATOR, NULL);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+					"Regulator enable failed rc = %d", rc);
+				return rc;
+			}
+
+			flash_ctrl->is_regulator_enabled = true;
+		} else if ((!regulator_enable) &&
+			(flash_ctrl->is_regulator_enabled == true)) {
+			rc = qpnp_flash_led_prepare(flash_ctrl->switch_trigger,
+				DISABLE_REGULATOR, NULL);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+					"Regulator disable failed rc = %d", rc);
+				return rc;
+			}
+
+			flash_ctrl->is_regulator_enabled = false;
+		} else {
+			CAM_ERR(CAM_FLASH, "Wrong Flash State : %d",
+				flash_ctrl->flash_state);
+			rc = -EINVAL;
+		}
+	}
+	return rc;
+}
+
+static int cam_flash_pmic_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+	int j = 0;
+	struct cam_flash_frame_setting *nrt_settings;
+
+	if (!fctrl)
+		return -EINVAL;
+
+	nrt_settings = &fctrl->nrt_info;
+
+	if (nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO) {
+		fctrl->flash_init_setting.cmn_attr.is_settings_valid = false;
+	} else if ((nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) ||
+		(nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_RER) ||
+		(nrt_settings->cmn_attr.cmd_type ==
+		CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE)) {
+		fctrl->nrt_info.cmn_attr.is_settings_valid = false;
+		fctrl->nrt_info.cmn_attr.count = 0;
+		fctrl->nrt_info.num_iterations = 0;
+		fctrl->nrt_info.led_on_delay_ms = 0;
+		fctrl->nrt_info.led_off_delay_ms = 0;
+		for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+			fctrl->nrt_info.led_current_ma[j] = 0;
+	}
+
+	return 0;
+}
+
+static int cam_flash_i2c_flush_nrt(struct cam_flash_ctrl *fctrl)
+{
+	int rc = 0;
+
+	if (fctrl->i2c_data.init_settings.is_settings_valid == true) {
+		rc = delete_request(&fctrl->i2c_data.init_settings);
+		if (rc) {
+			CAM_WARN(CAM_FLASH,
+				"Failed to delete Init i2c_setting: %d",
+				rc);
+			return rc;
+		}
+	}
+	if (fctrl->i2c_data.config_settings.is_settings_valid == true) {
+		rc = delete_request(&fctrl->i2c_data.config_settings);
+		if (rc) {
+			CAM_WARN(CAM_FLASH,
+				"Failed to delete NRT i2c_setting: %d",
+				rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_flash_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int rc = 0;
+
+	power_info->power_setting_size = 1;
+	power_info->power_setting =
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_setting[0].seq_type = SENSOR_CUSTOM_REG1;
+	power_info->power_setting[0].seq_val = CAM_V_CUSTOM1;
+	power_info->power_setting[0].config_val = 0;
+	power_info->power_setting[0].delay = 2;
+
+	power_info->power_down_setting_size = 1;
+	power_info->power_down_setting =
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		rc = -ENOMEM;
+		goto free_power_settings;
+	}
+
+	power_info->power_down_setting[0].seq_type = SENSOR_CUSTOM_REG1;
+	power_info->power_down_setting[0].seq_val = CAM_V_CUSTOM1;
+	power_info->power_down_setting[0].config_val = 0;
+
+	return rc;
+
+free_power_settings:
+	kfree(power_info->power_setting);
+	power_info->power_setting = NULL;
+	power_info->power_setting_size = 0;
+	return rc;
+}
+
+int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl,
+	bool regulator_enable)
+{
+	int rc = 0;
+
+	if (!(fctrl->switch_trigger)) {
+		CAM_ERR(CAM_FLASH, "Invalid argument");
+		return -EINVAL;
+	}
+
+	if (regulator_enable) {
+		rc = cam_flash_prepare(fctrl, true);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"Enable Regulator Failed rc = %d", rc);
+			return rc;
+		}
+		fctrl->last_flush_req = 0;
+	}
+
+	if (!regulator_enable) {
+		if ((fctrl->flash_state == CAM_FLASH_STATE_START) &&
+			(fctrl->is_regulator_enabled == true)) {
+			rc = cam_flash_prepare(fctrl, false);
+			if (rc)
+				CAM_ERR(CAM_FLASH,
+					"Disable Regulator Failed rc: %d", rc);
+		}
+	}
+
+	return rc;
+}
+
+int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl,
+	bool regulator_enable)
+{
+	int rc = 0;
+	struct cam_hw_soc_info *soc_info = &fctrl->soc_info;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&fctrl->power_info;
+
+	if (!power_info || !soc_info) {
+		CAM_ERR(CAM_FLASH, "Power Info is NULL");
+		return -EINVAL;
+	}
+	power_info->dev = soc_info->dev;
+
+	if (regulator_enable && (fctrl->is_regulator_enabled == false)) {
+		if ((power_info->power_setting == NULL) &&
+			(power_info->power_down_setting == NULL)) {
+			CAM_INFO(CAM_FLASH,
+				"Using default power settings");
+			rc = cam_flash_construct_default_power_setting(
+					power_info);
+			if (rc < 0) {
+				CAM_ERR(CAM_FLASH,
+				"Construct default pwr setting failed rc: %d",
+				rc);
+				return rc;
+			}
+		}
+
+		rc = cam_sensor_core_power_up(power_info, soc_info);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "power up the core is failed:%d",
+				rc);
+			goto free_pwr_settings;
+		}
+
+		rc = camera_io_init(&(fctrl->io_master_info));
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "cci_init failed: rc: %d", rc);
+			cam_sensor_util_power_down(power_info, soc_info);
+			goto free_pwr_settings;
+		}
+		fctrl->is_regulator_enabled = true;
+	} else if ((!regulator_enable) &&
+		(fctrl->is_regulator_enabled == true)) {
+		rc = cam_sensor_util_power_down(power_info, soc_info);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "power down the core is failed:%d",
+				rc);
+			return rc;
+		}
+		camera_io_release(&(fctrl->io_master_info));
+		fctrl->is_regulator_enabled = false;
+		goto free_pwr_settings;
+	}
+	return rc;
+
+free_pwr_settings:
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	power_info->power_setting_size = 0;
+	power_info->power_down_setting_size = 0;
+
+	return rc;
+}
+
+int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl,
+	enum cam_flash_flush_type type, uint64_t req_id)
+{
+	int rc = 0;
+	int i = 0, j = 0;
+	int frame_offset = 0;
+
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	if (type == FLUSH_ALL) {
+		cam_flash_off(fctrl);
+	/* flush all requests*/
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			fctrl->per_frame[i].cmn_attr.request_id = 0;
+			fctrl->per_frame[i].cmn_attr.is_settings_valid = false;
+			fctrl->per_frame[i].cmn_attr.count = 0;
+			for (j = 0; j < CAM_FLASH_MAX_LED_TRIGGERS; j++)
+				fctrl->per_frame[i].led_current_ma[j] = 0;
+		}
+
+		cam_flash_pmic_flush_nrt(fctrl);
+	} else if ((type == FLUSH_REQ) && (req_id != 0)) {
+	/* flush request with req_id*/
+		frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+		fctrl->per_frame[frame_offset].cmn_attr.request_id = 0;
+		fctrl->per_frame[frame_offset].cmn_attr.is_settings_valid =
+			false;
+		fctrl->per_frame[frame_offset].cmn_attr.count = 0;
+		for (i = 0; i < CAM_FLASH_MAX_LED_TRIGGERS; i++)
+			fctrl->per_frame[frame_offset].led_current_ma[i] = 0;
+	} else if ((type == FLUSH_REQ) && (req_id == 0)) {
+		/* Handels NonRealTime usecase */
+		cam_flash_pmic_flush_nrt(fctrl);
+	} else {
+		CAM_ERR(CAM_FLASH, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl,
+	enum cam_flash_flush_type type, uint64_t req_id)
+{
+	int rc = 0;
+	int i = 0;
+	uint32_t cancel_req_id_found = 0;
+	struct i2c_settings_array *i2c_set = NULL;
+
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		return -EINVAL;
+	}
+	if ((type == FLUSH_REQ) && (req_id == 0)) {
+		/* This setting will be called only when NonRealTime
+		 * settings needs to clean.
+		 */
+		cam_flash_i2c_flush_nrt(fctrl);
+	} else {
+		/* All other usecase will be handle here */
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			i2c_set = &(fctrl->i2c_data.per_frame[i]);
+
+			if ((type == FLUSH_REQ) &&
+				(i2c_set->request_id != req_id))
+				continue;
+
+			if (i2c_set->is_settings_valid == 1) {
+				rc = delete_request(i2c_set);
+				if (rc < 0)
+					CAM_ERR(CAM_FLASH,
+						"delete request: %lld rc: %d",
+						i2c_set->request_id, rc);
+
+				if (type == FLUSH_REQ) {
+					cancel_req_id_found = 1;
+					break;
+				}
+			}
+		}
+	}
+
+	if ((type == FLUSH_REQ) && (req_id != 0) &&
+			(!cancel_req_id_found))
+		CAM_DBG(CAM_FLASH,
+			"Flush request id:%lld not found in the pending list",
+			req_id);
+
+	return rc;
+}
+
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush)
+{
+	int rc = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+
+	fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(flush->dev_hdl);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&fctrl->flash_mutex);
+	if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		fctrl->last_flush_req = flush->req_id;
+		CAM_DBG(CAM_FLASH, "last reqest to flush is %lld",
+			flush->req_id);
+		rc = fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "FLUSH_TYPE_ALL failed rc: %d", rc);
+			goto end;
+		}
+	} else if (flush->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+		rc = fctrl->func_tbl.flush_req(fctrl,
+				FLUSH_REQ, flush->req_id);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "FLUSH_REQ failed rc: %d", rc);
+			goto end;
+		}
+	}
+end:
+	mutex_unlock(&fctrl->flash_mutex);
+	return rc;
+}
+
+static int cam_flash_ops(struct cam_flash_ctrl *flash_ctrl,
+	struct cam_flash_frame_setting *flash_data, enum camera_flash_opcode op)
+{
+	uint32_t curr = 0, max_current = 0;
+	struct cam_flash_private_soc *soc_private = NULL;
+	int i = 0;
+
+	if (!flash_ctrl || !flash_data) {
+		CAM_ERR(CAM_FLASH, "Fctrl or Data NULL");
+		return -EINVAL;
+	}
+
+	soc_private = (struct cam_flash_private_soc *)
+		flash_ctrl->soc_info.soc_private;
+
+	if (op == CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+		for (i = 0; i < flash_ctrl->torch_num_sources; i++) {
+			if (flash_ctrl->torch_trigger[i]) {
+				max_current = soc_private->torch_max_current[i];
+				if (flash_data->led_current_ma[i] <=
+					max_current)
+					curr = flash_data->led_current_ma[i];
+				else
+					curr = max_current;
+			}
+			CAM_DBG(CAM_FLASH, "Led_Torch[%d]: Current: %d",
+				i, curr);
+			cam_res_mgr_led_trigger_event(
+				flash_ctrl->torch_trigger[i], curr);
+		}
+	} else if (op == CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
+		for (i = 0; i < flash_ctrl->flash_num_sources; i++) {
+			if (flash_ctrl->flash_trigger[i]) {
+				max_current = soc_private->flash_max_current[i];
+				if (flash_data->led_current_ma[i] <=
+					max_current)
+					curr = flash_data->led_current_ma[i];
+				else
+					curr = max_current;
+			}
+			CAM_DBG(CAM_FLASH, "LED_Flash[%d]: Current: %d",
+				i, curr);
+			cam_res_mgr_led_trigger_event(
+				flash_ctrl->flash_trigger[i], curr);
+		}
+	} else {
+		CAM_ERR(CAM_FLASH, "Wrong Operation: %d", op);
+		return -EINVAL;
+	}
+
+	if (flash_ctrl->switch_trigger)
+		cam_res_mgr_led_trigger_event(
+			flash_ctrl->switch_trigger,
+			(enum led_brightness)LED_SWITCH_ON);
+
+	return 0;
+}
+
+int cam_flash_off(struct cam_flash_ctrl *flash_ctrl)
+{
+	if (!flash_ctrl) {
+		CAM_ERR(CAM_FLASH, "Flash control Null");
+		return -EINVAL;
+	}
+
+	if (flash_ctrl->switch_trigger)
+		cam_res_mgr_led_trigger_event(flash_ctrl->switch_trigger,
+			(enum led_brightness)LED_SWITCH_OFF);
+
+	flash_ctrl->flash_state = CAM_FLASH_STATE_START;
+	return 0;
+}
+
+static int cam_flash_low(
+	struct cam_flash_ctrl *flash_ctrl,
+	struct cam_flash_frame_setting *flash_data)
+{
+	int i = 0, rc = 0;
+
+	if (!flash_data) {
+		CAM_ERR(CAM_FLASH, "Flash Data Null");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < flash_ctrl->flash_num_sources; i++)
+		if (flash_ctrl->flash_trigger[i])
+			cam_res_mgr_led_trigger_event(
+				flash_ctrl->flash_trigger[i],
+				LED_OFF);
+
+	rc = cam_flash_ops(flash_ctrl, flash_data,
+		CAMERA_SENSOR_FLASH_OP_FIRELOW);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Fire Torch failed: %d", rc);
+
+	return rc;
+}
+
+static int cam_flash_high(
+	struct cam_flash_ctrl *flash_ctrl,
+	struct cam_flash_frame_setting *flash_data)
+{
+	int i = 0, rc = 0;
+
+	if (!flash_data) {
+		CAM_ERR(CAM_FLASH, "Flash Data Null");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < flash_ctrl->torch_num_sources; i++)
+		if (flash_ctrl->torch_trigger[i])
+			cam_res_mgr_led_trigger_event(
+				flash_ctrl->torch_trigger[i],
+				LED_OFF);
+
+	rc = cam_flash_ops(flash_ctrl, flash_data,
+		CAMERA_SENSOR_FLASH_OP_FIREHIGH);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Fire Flash Failed: %d", rc);
+
+	return rc;
+}
+
+static int cam_flash_i2c_delete_req(struct cam_flash_ctrl *fctrl,
+	uint64_t req_id)
+{
+	int i = 0, rc = 0;
+	uint64_t top = 0, del_req_id = 0;
+
+	if (req_id != 0) {
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			if ((req_id >=
+				fctrl->i2c_data.per_frame[i].request_id) &&
+				(top <
+				fctrl->i2c_data.per_frame[i].request_id) &&
+				(fctrl->i2c_data.per_frame[i].is_settings_valid
+					== 1)) {
+				del_req_id = top;
+				top = fctrl->i2c_data.per_frame[i].request_id;
+			}
+		}
+
+		if (top < req_id) {
+			if ((((top % MAX_PER_FRAME_ARRAY) - (req_id %
+				MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) ||
+				(((top % MAX_PER_FRAME_ARRAY) - (req_id %
+				MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX))
+				del_req_id = req_id;
+		}
+
+		if (!del_req_id)
+			return rc;
+
+		CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu",
+			top, del_req_id);
+	}
+	fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id);
+	return 0;
+}
+
+static int cam_flash_pmic_delete_req(struct cam_flash_ctrl *fctrl,
+	uint64_t req_id)
+{
+	int i = 0;
+	struct cam_flash_frame_setting *flash_data = NULL;
+	uint64_t top = 0, del_req_id = 0;
+
+	if (req_id != 0) {
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			flash_data = &fctrl->per_frame[i];
+			if (req_id >= flash_data->cmn_attr.request_id &&
+				flash_data->cmn_attr.is_settings_valid
+				== 1) {
+				if (top < flash_data->cmn_attr.request_id) {
+					del_req_id = top;
+					top = flash_data->cmn_attr.request_id;
+				} else if (top >
+					flash_data->cmn_attr.request_id &&
+					del_req_id <
+					flash_data->cmn_attr.request_id) {
+					del_req_id =
+						flash_data->cmn_attr.request_id;
+				}
+			}
+		}
+
+		if (top < req_id) {
+			if ((((top % MAX_PER_FRAME_ARRAY) - (req_id %
+				MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) ||
+				(((top % MAX_PER_FRAME_ARRAY) - (req_id %
+				MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX))
+				del_req_id = req_id;
+		}
+
+		if (!del_req_id)
+			return 0;
+
+		CAM_DBG(CAM_FLASH, "top: %llu, del_req_id:%llu",
+			top, del_req_id);
+	}
+
+	fctrl->func_tbl.flush_req(fctrl, FLUSH_REQ, del_req_id);
+	return 0;
+}
+
+static int32_t cam_flash_slaveInfo_pkt_parser(struct cam_flash_ctrl *fctrl,
+	uint32_t *cmd_buf)
+{
+	int32_t rc = 0;
+	struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+
+	if (fctrl->io_master_info.master_type == CCI_MASTER) {
+		fctrl->io_master_info.cci_client->cci_i2c_master =
+			fctrl->cci_i2c_master;
+		fctrl->io_master_info.cci_client->i2c_freq_mode =
+			i2c_info->i2c_freq_mode;
+		fctrl->io_master_info.cci_client->sid =
+			i2c_info->slave_addr >> 1;
+		CAM_DBG(CAM_FLASH, "Slave addr: 0x%x Freq Mode: %d",
+			i2c_info->slave_addr, i2c_info->i2c_freq_mode);
+	} else if (fctrl->io_master_info.master_type == I2C_MASTER) {
+		fctrl->io_master_info.client->addr = i2c_info->slave_addr;
+		CAM_DBG(CAM_FLASH, "Slave addr: 0x%x", i2c_info->slave_addr);
+	} else {
+		CAM_ERR(CAM_FLASH, "Invalid Master type: %d",
+			fctrl->io_master_info.master_type);
+		 rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl,
+	uint64_t req_id)
+{
+	struct i2c_settings_list *i2c_list;
+	struct i2c_settings_array *i2c_set = NULL;
+	int frame_offset = 0, rc = 0;
+
+	if (req_id == 0) {
+		/* NonRealTime Init settings*/
+		if (fctrl->i2c_data.init_settings.is_settings_valid == true) {
+			list_for_each_entry(i2c_list,
+				&(fctrl->i2c_data.init_settings.list_head),
+				list) {
+				rc = cam_sensor_util_i2c_apply_setting
+					(&(fctrl->io_master_info), i2c_list);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Failed to apply init settings: %d",
+					rc);
+					return rc;
+				}
+			}
+		}
+		/* NonRealTime (Widget/RER/INIT_FIRE settings) */
+		if (fctrl->i2c_data.config_settings.is_settings_valid == true) {
+			list_for_each_entry(i2c_list,
+				&(fctrl->i2c_data.config_settings.list_head),
+				list) {
+				rc = cam_sensor_util_i2c_apply_setting
+					(&(fctrl->io_master_info), i2c_list);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Failed to apply NRT settings: %d", rc);
+					return rc;
+				}
+			}
+		}
+	} else {
+		/* RealTime */
+		frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+		i2c_set = &fctrl->i2c_data.per_frame[frame_offset];
+		if ((i2c_set->is_settings_valid == true) &&
+			(i2c_set->request_id == req_id)) {
+			list_for_each_entry(i2c_list,
+				&(i2c_set->list_head), list) {
+				rc = cam_sensor_util_i2c_apply_setting(
+					&(fctrl->io_master_info), i2c_list);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Failed to apply settings: %d", rc);
+					return rc;
+				}
+			}
+		}
+	}
+
+	cam_flash_i2c_delete_req(fctrl, req_id);
+	return rc;
+}
+
+int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl,
+	uint64_t req_id)
+{
+	int rc = 0, i = 0;
+	int frame_offset = 0;
+	uint16_t num_iterations;
+	struct cam_flash_frame_setting *flash_data = NULL;
+
+	if (req_id == 0) {
+		if (fctrl->nrt_info.cmn_attr.cmd_type ==
+			CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE) {
+			flash_data = &fctrl->nrt_info;
+			CAM_DBG(CAM_REQ,
+				"FLASH_INIT_FIRE req_id: %u flash_opcode: %d",
+				req_id, flash_data->opcode);
+
+			if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_FIREHIGH) {
+				if (fctrl->flash_state ==
+					CAM_FLASH_STATE_START) {
+					CAM_WARN(CAM_FLASH,
+					"Wrong state :Prev state: %d",
+					fctrl->flash_state);
+					return -EINVAL;
+				}
+
+				rc = cam_flash_high(fctrl, flash_data);
+				if (rc)
+					CAM_ERR(CAM_FLASH,
+						"FLASH ON failed : %d", rc);
+			}
+			if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+				if (fctrl->flash_state ==
+					CAM_FLASH_STATE_START) {
+					CAM_WARN(CAM_FLASH,
+					"Wrong state :Prev state: %d",
+					fctrl->flash_state);
+					return -EINVAL;
+				}
+
+				rc = cam_flash_low(fctrl, flash_data);
+				if (rc)
+					CAM_ERR(CAM_FLASH,
+						"TORCH ON failed : %d", rc);
+			}
+			if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_OFF) {
+				rc = cam_flash_off(fctrl);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"LED OFF FAILED: %d",
+					rc);
+					return rc;
+				}
+			}
+		} else if (fctrl->nrt_info.cmn_attr.cmd_type ==
+			CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET) {
+			flash_data = &fctrl->nrt_info;
+			CAM_DBG(CAM_REQ,
+				"FLASH_WIDGET req_id: %u flash_opcode: %d",
+				req_id, flash_data->opcode);
+
+			if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_FIRELOW) {
+				rc = cam_flash_low(fctrl, flash_data);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Torch ON failed : %d",
+						rc);
+					goto nrt_del_req;
+				}
+			} else if (flash_data->opcode ==
+				CAMERA_SENSOR_FLASH_OP_OFF) {
+				rc = cam_flash_off(fctrl);
+				if (rc)
+					CAM_ERR(CAM_FLASH,
+					"LED off failed: %d",
+					rc);
+			}
+		} else if (fctrl->nrt_info.cmn_attr.cmd_type ==
+			CAMERA_SENSOR_FLASH_CMD_TYPE_RER) {
+			flash_data = &fctrl->nrt_info;
+			if (fctrl->flash_state != CAM_FLASH_STATE_START) {
+				rc = cam_flash_off(fctrl);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Flash off failed: %d",
+						rc);
+					goto nrt_del_req;
+				}
+			}
+			CAM_DBG(CAM_REQ, "FLASH_RER req_id: %u", req_id);
+
+			num_iterations = flash_data->num_iterations;
+			for (i = 0; i < num_iterations; i++) {
+				/* Turn On Torch */
+				if (fctrl->flash_state ==
+					CAM_FLASH_STATE_START) {
+					rc = cam_flash_low(fctrl, flash_data);
+					if (rc) {
+						CAM_ERR(CAM_FLASH,
+							"Fire Torch Failed");
+						goto nrt_del_req;
+					}
+
+					usleep_range(
+					flash_data->led_on_delay_ms * 1000,
+					flash_data->led_on_delay_ms * 1000 +
+						100);
+				}
+				/* Turn Off Torch */
+				rc = cam_flash_off(fctrl);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Flash off failed: %d", rc);
+					continue;
+				}
+				fctrl->flash_state = CAM_FLASH_STATE_START;
+				usleep_range(
+				flash_data->led_off_delay_ms * 1000,
+				flash_data->led_off_delay_ms * 1000 + 100);
+			}
+		}
+	} else {
+		frame_offset = req_id % MAX_PER_FRAME_ARRAY;
+		flash_data = &fctrl->per_frame[frame_offset];
+		CAM_DBG(CAM_REQ, "FLASH_RT req_id: %u flash_opcode: %d",
+			req_id, flash_data->opcode);
+
+		if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_FIREHIGH) &&
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
+			/* Turn On Flash */
+			if (fctrl->flash_state == CAM_FLASH_STATE_START) {
+				rc = cam_flash_high(fctrl, flash_data);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Flash ON failed: rc= %d",
+						rc);
+					goto apply_setting_err;
+				}
+			}
+		} else if ((flash_data->opcode ==
+			CAMERA_SENSOR_FLASH_OP_FIRELOW) &&
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
+			/* Turn On Torch */
+			if (fctrl->flash_state == CAM_FLASH_STATE_START) {
+				rc = cam_flash_low(fctrl, flash_data);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+						"Torch ON failed: rc= %d",
+						rc);
+					goto apply_setting_err;
+				}
+			}
+		} else if ((flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF) &&
+			(flash_data->cmn_attr.is_settings_valid) &&
+			(flash_data->cmn_attr.request_id == req_id)) {
+			rc = cam_flash_off(fctrl);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+					"Flash off failed %d", rc);
+				goto apply_setting_err;
+			}
+		} else if (flash_data->opcode == CAM_PKT_NOP_OPCODE) {
+			CAM_DBG(CAM_FLASH, "NOP Packet");
+		} else {
+			rc = -EINVAL;
+			CAM_ERR(CAM_FLASH, "Invalid opcode: %d req_id: %llu",
+				flash_data->opcode, req_id);
+			goto apply_setting_err;
+		}
+	}
+
+nrt_del_req:
+	cam_flash_pmic_delete_req(fctrl, req_id);
+apply_setting_err:
+	return rc;
+}
+
+int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+	int rc = 0, i = 0;
+	uintptr_t generic_ptr;
+	uint32_t total_cmd_buf_in_bytes = 0;
+	uint32_t processed_cmd_buf_in_bytes = 0;
+	uint16_t cmd_length_in_bytes = 0;
+	uint32_t *cmd_buf =  NULL;
+	uint32_t *offset = NULL;
+	uint32_t frm_offset = 0;
+	size_t len_of_buffer;
+	struct cam_flash_init *flash_init = NULL;
+	struct common_header  *cmn_hdr = NULL;
+	struct cam_control *ioctl_ctrl = NULL;
+	struct cam_packet *csl_packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct cam_config_dev_cmd config;
+	struct cam_req_mgr_add_request add_req;
+	struct i2c_data_settings *i2c_data = NULL;
+	struct i2c_settings_array *i2c_reg_settings = NULL;
+	struct cam_sensor_power_ctrl_t *power_info = NULL;
+
+	if (!fctrl || !arg) {
+		CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+		return -EINVAL;
+	}
+	/* getting CSL Packet */
+	ioctl_ctrl = (struct cam_control *)arg;
+
+	if (copy_from_user((&config), (void __user *) ioctl_ctrl->handle,
+		sizeof(config))) {
+		CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+		return -EFAULT;
+	}
+
+	rc = cam_mem_get_cpu_buf(config.packet_handle,
+		&generic_ptr, &len_of_buffer);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+		return rc;
+	}
+
+	if (config.offset > len_of_buffer) {
+		CAM_ERR(CAM_FLASH,
+			"offset is out of bounds: offset: %lld len: %zu",
+			config.offset, len_of_buffer);
+		return -EINVAL;
+	}
+
+	/* Add offset to the flash csl header */
+	csl_packet = (struct cam_packet *)(generic_ptr + config.offset);
+
+	if ((csl_packet->header.op_code & 0xFFFFFF) !=
+		CAM_FLASH_PACKET_OPCODE_INIT &&
+		csl_packet->header.request_id <= fctrl->last_flush_req
+		&& fctrl->last_flush_req != 0) {
+		CAM_DBG(CAM_FLASH,
+			"reject request %lld, last request to flush %lld",
+			csl_packet->header.request_id, fctrl->last_flush_req);
+		return -EINVAL;
+	}
+
+	if (csl_packet->header.request_id > fctrl->last_flush_req)
+		fctrl->last_flush_req = 0;
+
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_FLASH_PACKET_OPCODE_INIT: {
+		/* INIT packet*/
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+		/* Loop through multiple command buffers */
+		for (i = 1; i < csl_packet->num_cmd_buf; i++) {
+			total_cmd_buf_in_bytes = cmd_desc[i].length;
+			processed_cmd_buf_in_bytes = 0;
+			if (!total_cmd_buf_in_bytes)
+				continue;
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+				&generic_ptr, &len_of_buffer);
+			if (rc < 0) {
+				CAM_ERR(CAM_FLASH, "Failed to get cpu buf");
+				return rc;
+			}
+			cmd_buf = (uint32_t *)generic_ptr;
+			if (!cmd_buf) {
+				CAM_ERR(CAM_FLASH, "invalid cmd buf");
+				return -EINVAL;
+			}
+			cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+			cmn_hdr = (struct common_header *)cmd_buf;
+
+			/* Loop through cmd formats in one cmd buffer */
+			CAM_DBG(CAM_FLASH,
+				"command Type: %d,Processed: %d,Total: %d",
+				cmn_hdr->cmd_type, processed_cmd_buf_in_bytes,
+				total_cmd_buf_in_bytes);
+			switch (cmn_hdr->cmd_type) {
+			case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO:
+				flash_init = (struct cam_flash_init *)cmd_buf;
+				fctrl->flash_type = flash_init->flash_type;
+				cmd_length_in_bytes =
+					sizeof(struct cam_flash_init);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+						sizeof(uint32_t);
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+				rc = cam_flash_slaveInfo_pkt_parser(
+					fctrl, cmd_buf);
+				if (rc < 0) {
+					CAM_ERR(CAM_FLASH,
+					"Failed parsing slave info: rc: %d",
+					rc);
+					return rc;
+				}
+				cmd_length_in_bytes =
+					sizeof(struct cam_cmd_i2c_info);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+						sizeof(uint32_t);
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+				CAM_DBG(CAM_FLASH,
+					"Received power settings");
+				cmd_length_in_bytes =
+					total_cmd_buf_in_bytes;
+				rc = cam_sensor_update_power_settings(
+					cmd_buf,
+					total_cmd_buf_in_bytes,
+					&fctrl->power_info);
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+						sizeof(uint32_t);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"Failed update power settings");
+					return rc;
+				}
+				break;
+			default:
+				CAM_DBG(CAM_FLASH,
+					"Received initSettings");
+				i2c_data = &(fctrl->i2c_data);
+				i2c_reg_settings =
+					&fctrl->i2c_data.init_settings;
+
+				i2c_reg_settings->request_id = 0;
+				i2c_reg_settings->is_settings_valid = 1;
+				rc = cam_sensor_i2c_command_parser(
+					&fctrl->io_master_info,
+					i2c_reg_settings,
+					&cmd_desc[i], 1);
+				if (rc < 0) {
+					CAM_ERR(CAM_FLASH,
+					"pkt parsing failed: %d", rc);
+					return rc;
+				}
+				cmd_length_in_bytes =
+					cmd_desc[i].length;
+				processed_cmd_buf_in_bytes +=
+					cmd_length_in_bytes;
+				cmd_buf += cmd_length_in_bytes/
+						sizeof(uint32_t);
+
+				break;
+			}
+		}
+		power_info = &fctrl->power_info;
+		if (!power_info) {
+			CAM_ERR(CAM_FLASH, "Power_info is NULL");
+			return -EINVAL;
+		}
+
+		/* Parse and fill vreg params for power up settings */
+		rc = msm_camera_fill_vreg_params(&fctrl->soc_info,
+			power_info->power_setting,
+			power_info->power_setting_size);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"failed to fill vreg params for power up rc:%d",
+				rc);
+			return rc;
+		}
+
+		/* Parse and fill vreg params for power down settings*/
+		rc = msm_camera_fill_vreg_params(
+			&fctrl->soc_info,
+			power_info->power_down_setting,
+			power_info->power_down_setting_size);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"failed to fill vreg params power down rc:%d",
+				rc);
+			return rc;
+		}
+
+		rc = fctrl->func_tbl.power_ops(fctrl, true);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"Enable Regulator Failed rc = %d", rc);
+			return rc;
+		}
+
+		rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "cannot apply settings rc = %d", rc);
+			return rc;
+		}
+
+		fctrl->flash_state = CAM_FLASH_STATE_CONFIG;
+		break;
+	}
+	case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		frm_offset = csl_packet->header.request_id %
+			MAX_PER_FRAME_ARRAY;
+		/* add support for handling i2c_data*/
+		i2c_reg_settings =
+			&fctrl->i2c_data.per_frame[frm_offset];
+		if (i2c_reg_settings->is_settings_valid == true) {
+			i2c_reg_settings->request_id = 0;
+			i2c_reg_settings->is_settings_valid = false;
+			goto update_req_mgr;
+		}
+		i2c_reg_settings->is_settings_valid = true;
+		i2c_reg_settings->request_id =
+			csl_packet->header.request_id;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_sensor_i2c_command_parser(
+			&fctrl->io_master_info,
+			i2c_reg_settings, cmd_desc, 1);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+			"Failed in parsing i2c packets");
+			return rc;
+		}
+		break;
+	}
+	case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+
+		/* add support for handling i2c_data*/
+		i2c_reg_settings = &fctrl->i2c_data.config_settings;
+		if (i2c_reg_settings->is_settings_valid == true) {
+			i2c_reg_settings->request_id = 0;
+			i2c_reg_settings->is_settings_valid = false;
+
+			rc = delete_request(i2c_reg_settings);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+				"Failed in Deleting the err: %d", rc);
+				return rc;
+			}
+		}
+		i2c_reg_settings->is_settings_valid = true;
+		i2c_reg_settings->request_id =
+			csl_packet->header.request_id;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_sensor_i2c_command_parser(
+			&fctrl->io_master_info,
+			i2c_reg_settings, cmd_desc, 1);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+			"Failed in parsing i2c NRT packets");
+			return rc;
+		}
+		rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+		if (rc)
+			CAM_ERR(CAM_FLASH,
+			"Apply setting failed: %d", rc);
+		return rc;
+	}
+	case CAM_PKT_NOP_OPCODE: {
+		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) {
+			CAM_WARN(CAM_FLASH,
+				"Rxed NOP packets without linking");
+			frm_offset = csl_packet->header.request_id %
+				MAX_PER_FRAME_ARRAY;
+			fctrl->i2c_data.per_frame[frm_offset].is_settings_valid
+				= false;
+			return 0;
+		}
+
+		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %u",
+			csl_packet->header.request_id);
+		goto update_req_mgr;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+			(csl_packet->header.op_code & 0xFFFFFF));
+		return -EINVAL;
+	}
+update_req_mgr:
+	if (((csl_packet->header.op_code  & 0xFFFFF) ==
+		CAM_PKT_NOP_OPCODE) ||
+		((csl_packet->header.op_code & 0xFFFFF) ==
+		CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+		add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+		add_req.req_id = csl_packet->header.request_id;
+		add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+
+		if ((csl_packet->header.op_code & 0xFFFFF) ==
+			CAM_FLASH_PACKET_OPCODE_SET_OPS)
+			add_req.skip_before_applying = 1;
+		else
+			add_req.skip_before_applying = 0;
+
+		if (fctrl->bridge_intf.crm_cb &&
+			fctrl->bridge_intf.crm_cb->add_req)
+			fctrl->bridge_intf.crm_cb->add_req(&add_req);
+		CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+	}
+	return rc;
+}
+
+int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg)
+{
+	int rc = 0, i = 0;
+	uintptr_t generic_ptr;
+	uint32_t *cmd_buf =  NULL;
+	uint32_t *offset = NULL;
+	uint32_t frm_offset = 0;
+	size_t len_of_buffer;
+	struct cam_control *ioctl_ctrl = NULL;
+	struct cam_packet *csl_packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct common_header *cmn_hdr;
+	struct cam_config_dev_cmd config;
+	struct cam_req_mgr_add_request add_req = {0};
+	struct cam_flash_init *cam_flash_info = NULL;
+	struct cam_flash_set_rer *flash_rer_info = NULL;
+	struct cam_flash_set_on_off *flash_operation_info = NULL;
+	struct cam_flash_query_curr *flash_query_info = NULL;
+	struct cam_flash_frame_setting *flash_data = NULL;
+	struct cam_flash_private_soc *soc_private = NULL;
+
+	if (!fctrl || !arg) {
+		CAM_ERR(CAM_FLASH, "fctrl/arg is NULL");
+		return -EINVAL;
+	}
+
+	soc_private = (struct cam_flash_private_soc *)
+		fctrl->soc_info.soc_private;
+
+	/* getting CSL Packet */
+	ioctl_ctrl = (struct cam_control *)arg;
+
+	if (copy_from_user((&config),
+		u64_to_user_ptr(ioctl_ctrl->handle),
+		sizeof(config))) {
+		CAM_ERR(CAM_FLASH, "Copy cmd handle from user failed");
+		rc = -EFAULT;
+		return rc;
+	}
+
+	rc = cam_mem_get_cpu_buf(config.packet_handle,
+		&generic_ptr, &len_of_buffer);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "Failed in getting the buffer : %d", rc);
+		return rc;
+	}
+
+	if (config.offset > len_of_buffer) {
+		CAM_ERR(CAM_FLASH,
+			"offset is out of bounds: offset: %lld len: %zu",
+			config.offset, len_of_buffer);
+		return -EINVAL;
+	}
+
+	/* Add offset to the flash csl header */
+	csl_packet =
+		(struct cam_packet *)(generic_ptr + (uint32_t)config.offset);
+
+	if ((csl_packet->header.op_code & 0xFFFFFF) !=
+		CAM_FLASH_PACKET_OPCODE_INIT &&
+		csl_packet->header.request_id <= fctrl->last_flush_req
+		&& fctrl->last_flush_req != 0) {
+		CAM_DBG(CAM_FLASH,
+			"reject request %lld, last request to flush %lld",
+			csl_packet->header.request_id, fctrl->last_flush_req);
+		return -EINVAL;
+	}
+
+	if (csl_packet->header.request_id > fctrl->last_flush_req)
+		fctrl->last_flush_req = 0;
+
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_FLASH_PACKET_OPCODE_INIT: {
+		/* INIT packet*/
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+			&generic_ptr, &len_of_buffer);
+		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+			cmd_desc->offset);
+		cam_flash_info = (struct cam_flash_init *)cmd_buf;
+
+		switch (cam_flash_info->cmd_type) {
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO: {
+			CAM_DBG(CAM_FLASH, "INIT_INFO CMD CALLED");
+			fctrl->flash_init_setting.cmn_attr.request_id = 0;
+			fctrl->flash_init_setting.cmn_attr.is_settings_valid =
+				true;
+			fctrl->flash_type = cam_flash_info->flash_type;
+			fctrl->is_regulator_enabled = false;
+			fctrl->nrt_info.cmn_attr.cmd_type =
+				CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO;
+
+			rc = fctrl->func_tbl.power_ops(fctrl, true);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+					"Enable Regulator Failed rc = %d", rc);
+				return rc;
+			}
+
+			fctrl->flash_state =
+				CAM_FLASH_STATE_CONFIG;
+			break;
+		}
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE: {
+			CAM_DBG(CAM_FLASH, "INIT_FIRE Operation");
+
+			flash_operation_info =
+				(struct cam_flash_set_on_off *) cmd_buf;
+			fctrl->nrt_info.cmn_attr.count =
+				flash_operation_info->count;
+			fctrl->nrt_info.cmn_attr.request_id = 0;
+			fctrl->nrt_info.opcode =
+				flash_operation_info->opcode;
+			fctrl->nrt_info.cmn_attr.cmd_type =
+				CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE;
+			for (i = 0;
+				i < flash_operation_info->count; i++)
+				fctrl->nrt_info.led_current_ma[i] =
+				flash_operation_info->led_current_ma[i];
+
+			rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+			if (rc)
+				CAM_ERR(CAM_FLASH,
+					"Apply setting failed: %d",
+					rc);
+
+			fctrl->flash_state = CAM_FLASH_STATE_CONFIG;
+			break;
+		}
+		default:
+			CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+				cam_flash_info->cmd_type);
+			return -EINVAL;
+		}
+		break;
+	}
+	case CAM_FLASH_PACKET_OPCODE_SET_OPS: {
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		frm_offset = csl_packet->header.request_id %
+			MAX_PER_FRAME_ARRAY;
+		flash_data = &fctrl->per_frame[frm_offset];
+
+		if (flash_data->cmn_attr.is_settings_valid == true) {
+			flash_data->cmn_attr.request_id = 0;
+			flash_data->cmn_attr.is_settings_valid = false;
+			for (i = 0; i < flash_data->cmn_attr.count; i++)
+				flash_data->led_current_ma[i] = 0;
+		}
+
+		flash_data->cmn_attr.request_id = csl_packet->header.request_id;
+		flash_data->cmn_attr.is_settings_valid = true;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+			&generic_ptr, &len_of_buffer);
+		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+			cmd_desc->offset);
+
+		if (!cmd_buf)
+			return -EINVAL;
+
+		cmn_hdr = (struct common_header *)cmd_buf;
+
+		switch (cmn_hdr->cmd_type) {
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE: {
+			CAM_DBG(CAM_FLASH,
+				"CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE cmd called");
+			if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+				(fctrl->flash_state ==
+					CAM_FLASH_STATE_ACQUIRE)) {
+				CAM_WARN(CAM_FLASH,
+					"Rxed Flash fire ops without linking");
+				flash_data->cmn_attr.is_settings_valid = false;
+				return 0;
+			}
+
+			flash_operation_info =
+				(struct cam_flash_set_on_off *) cmd_buf;
+			if (!flash_operation_info) {
+				CAM_ERR(CAM_FLASH,
+					"flash_operation_info Null");
+				return -EINVAL;
+			}
+
+			flash_data->opcode = flash_operation_info->opcode;
+			flash_data->cmn_attr.count =
+				flash_operation_info->count;
+			for (i = 0; i < flash_operation_info->count; i++)
+				flash_data->led_current_ma[i]
+				= flash_operation_info->led_current_ma[i];
+
+			if (flash_data->opcode == CAMERA_SENSOR_FLASH_OP_OFF)
+				add_req.skip_before_applying |= SKIP_NEXT_FRAME;
+		}
+		break;
+		default:
+			CAM_ERR(CAM_FLASH, "Wrong cmd_type = %d",
+				cmn_hdr->cmd_type);
+			return -EINVAL;
+		}
+		break;
+	}
+	case CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS: {
+		offset = (uint32_t *)((uint8_t *)&csl_packet->payload +
+			csl_packet->cmd_buf_offset);
+		fctrl->nrt_info.cmn_attr.is_settings_valid = true;
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		rc = cam_mem_get_cpu_buf(cmd_desc->mem_handle,
+			&generic_ptr, &len_of_buffer);
+		cmd_buf = (uint32_t *)((uint8_t *)generic_ptr +
+			cmd_desc->offset);
+		cmn_hdr = (struct common_header *)cmd_buf;
+
+		switch (cmn_hdr->cmd_type) {
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET: {
+			CAM_DBG(CAM_FLASH, "Widget Flash Operation");
+			flash_operation_info =
+				(struct cam_flash_set_on_off *) cmd_buf;
+			fctrl->nrt_info.cmn_attr.count =
+				flash_operation_info->count;
+			fctrl->nrt_info.cmn_attr.request_id = 0;
+			fctrl->nrt_info.opcode =
+				flash_operation_info->opcode;
+			fctrl->nrt_info.cmn_attr.cmd_type =
+				CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET;
+
+			for (i = 0; i < flash_operation_info->count; i++)
+				fctrl->nrt_info.led_current_ma[i] =
+					flash_operation_info->led_current_ma[i];
+
+			rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+			if (rc)
+				CAM_ERR(CAM_FLASH, "Apply setting failed: %d",
+					rc);
+			return rc;
+		}
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR: {
+			int query_curr_ma = 0;
+
+			flash_query_info =
+				(struct cam_flash_query_curr *)cmd_buf;
+
+			if (soc_private->is_wled_flash)
+				rc = wled_flash_led_prepare(
+					fctrl->switch_trigger,
+					QUERY_MAX_AVAIL_CURRENT,
+					&query_curr_ma);
+			else
+				rc = qpnp_flash_led_prepare(
+					fctrl->switch_trigger,
+					QUERY_MAX_AVAIL_CURRENT,
+					&query_curr_ma);
+
+			CAM_DBG(CAM_FLASH, "query_curr_ma = %d",
+				query_curr_ma);
+			if (rc) {
+				CAM_ERR(CAM_FLASH,
+				"Query current failed with rc=%d", rc);
+				return rc;
+			}
+			flash_query_info->query_current_ma = query_curr_ma;
+			break;
+		}
+		case CAMERA_SENSOR_FLASH_CMD_TYPE_RER: {
+			rc = 0;
+			flash_rer_info = (struct cam_flash_set_rer *)cmd_buf;
+			fctrl->nrt_info.cmn_attr.cmd_type =
+				CAMERA_SENSOR_FLASH_CMD_TYPE_RER;
+			fctrl->nrt_info.opcode = flash_rer_info->opcode;
+			fctrl->nrt_info.cmn_attr.count = flash_rer_info->count;
+			fctrl->nrt_info.cmn_attr.request_id = 0;
+			fctrl->nrt_info.num_iterations =
+				flash_rer_info->num_iteration;
+			fctrl->nrt_info.led_on_delay_ms =
+				flash_rer_info->led_on_delay_ms;
+			fctrl->nrt_info.led_off_delay_ms =
+				flash_rer_info->led_off_delay_ms;
+
+			for (i = 0; i < flash_rer_info->count; i++)
+				fctrl->nrt_info.led_current_ma[i] =
+					flash_rer_info->led_current_ma[i];
+
+
+			rc = fctrl->func_tbl.apply_setting(fctrl, 0);
+			if (rc)
+				CAM_ERR(CAM_FLASH, "apply_setting failed: %d",
+					rc);
+			return rc;
+		}
+		default:
+			CAM_ERR(CAM_FLASH, "Wrong cmd_type : %d",
+				cmn_hdr->cmd_type);
+			return -EINVAL;
+		}
+		break;
+	}
+	case CAM_PKT_NOP_OPCODE: {
+		frm_offset = csl_packet->header.request_id %
+			MAX_PER_FRAME_ARRAY;
+		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE)) {
+			CAM_WARN(CAM_FLASH,
+				"Rxed NOP packets without linking");
+			fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid
+				= false;
+			return 0;
+		}
+
+		fctrl->per_frame[frm_offset].cmn_attr.is_settings_valid = false;
+		fctrl->per_frame[frm_offset].cmn_attr.request_id = 0;
+		fctrl->per_frame[frm_offset].opcode = CAM_PKT_NOP_OPCODE;
+		CAM_DBG(CAM_FLASH, "NOP Packet is Received: req_id: %llu",
+			csl_packet->header.request_id);
+		goto update_req_mgr;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Wrong Opcode : %d",
+			(csl_packet->header.op_code & 0xFFFFFF));
+		return -EINVAL;
+	}
+update_req_mgr:
+	if (((csl_packet->header.op_code  & 0xFFFFF) ==
+		CAM_PKT_NOP_OPCODE) ||
+		((csl_packet->header.op_code & 0xFFFFF) ==
+		CAM_FLASH_PACKET_OPCODE_SET_OPS)) {
+		add_req.link_hdl = fctrl->bridge_intf.link_hdl;
+		add_req.req_id = csl_packet->header.request_id;
+		add_req.dev_hdl = fctrl->bridge_intf.device_hdl;
+
+		if ((csl_packet->header.op_code & 0xFFFFF) ==
+			CAM_FLASH_PACKET_OPCODE_SET_OPS)
+			add_req.skip_before_applying |= 1;
+		else
+			add_req.skip_before_applying = 0;
+
+		if (fctrl->bridge_intf.crm_cb &&
+			fctrl->bridge_intf.crm_cb->add_req)
+			fctrl->bridge_intf.crm_cb->add_req(&add_req);
+		CAM_DBG(CAM_FLASH, "add req to req_mgr= %lld", add_req.req_id);
+	}
+
+	return rc;
+}
+
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+	info->dev_id = CAM_REQ_MGR_DEVICE_FLASH;
+	strlcpy(info->name, CAM_FLASH_NAME, sizeof(info->name));
+	info->p_delay = CAM_FLASH_PIPELINE_DELAY;
+	info->trigger = CAM_TRIGGER_POINT_SOF;
+	return 0;
+}
+
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
+{
+	struct cam_flash_ctrl *fctrl = NULL;
+
+	if (!link)
+		return -EINVAL;
+
+	fctrl = (struct cam_flash_ctrl *)cam_get_device_priv(link->dev_hdl);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, " Device data is NULL");
+		return -EINVAL;
+	}
+	mutex_lock(&fctrl->flash_mutex);
+	if (link->link_enable) {
+		fctrl->bridge_intf.link_hdl = link->link_hdl;
+		fctrl->bridge_intf.crm_cb = link->crm_cb;
+	} else {
+		fctrl->bridge_intf.link_hdl = -1;
+		fctrl->bridge_intf.crm_cb = NULL;
+	}
+	mutex_unlock(&fctrl->flash_mutex);
+
+	return 0;
+}
+
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl)
+{
+	int rc = 0;
+
+	if (fctrl->bridge_intf.device_hdl != 1) {
+		rc = cam_destroy_device_hdl(fctrl->bridge_intf.device_hdl);
+		if (rc)
+			CAM_ERR(CAM_FLASH,
+				"Failed in destroying device handle rc = %d",
+				rc);
+		fctrl->bridge_intf.device_hdl = -1;
+		fctrl->bridge_intf.link_hdl = -1;
+		fctrl->bridge_intf.session_hdl = -1;
+		fctrl->last_flush_req = 0;
+	}
+
+	return rc;
+}
+
+void cam_flash_shutdown(struct cam_flash_ctrl *fctrl)
+{
+	int rc;
+
+	if (fctrl->flash_state == CAM_FLASH_STATE_INIT)
+		return;
+
+	if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
+		(fctrl->flash_state == CAM_FLASH_STATE_START)) {
+		fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+		rc = fctrl->func_tbl.power_ops(fctrl, false);
+		if (rc)
+			CAM_ERR(CAM_FLASH, "Power Down Failed rc: %d",
+				rc);
+	}
+
+	rc = cam_flash_release_dev(fctrl);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Release failed rc: %d", rc);
+
+	fctrl->flash_state = CAM_FLASH_STATE_INIT;
+}
+
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+	int rc = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+
+	if (!apply)
+		return -EINVAL;
+
+	fctrl = (struct cam_flash_ctrl *) cam_get_device_priv(apply->dev_hdl);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&fctrl->flash_mutex);
+	rc = fctrl->func_tbl.apply_setting(fctrl, apply->request_id);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "apply_setting failed with rc=%d",
+			rc);
+	mutex_unlock(&fctrl->flash_mutex);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
new file mode 100644
index 0000000..def6779
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FLASH_CORE_H_
+#define _CAM_FLASH_CORE_H_
+
+#include <media/cam_sensor.h>
+#include "cam_flash_dev.h"
+
+int cam_flash_publish_dev_info(struct cam_req_mgr_device_info *info);
+int cam_flash_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
+int cam_flash_apply_request(struct cam_req_mgr_apply_request *apply);
+int cam_flash_process_evt(struct cam_req_mgr_link_evt_data *event_data);
+int cam_flash_flush_request(struct cam_req_mgr_flush_request *flush);
+
+
+#endif /*_CAM_FLASH_CORE_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
new file mode 100644
index 0000000..c2d713d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.c
@@ -0,0 +1,632 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include "cam_flash_dev.h"
+#include "cam_flash_soc.h"
+#include "cam_flash_core.h"
+#include "cam_common_util.h"
+
+static int32_t cam_flash_driver_cmd(struct cam_flash_ctrl *fctrl,
+		void *arg, struct cam_flash_private_soc *soc_private)
+{
+	int rc = 0;
+	int i = 0;
+	struct cam_control *cmd = (struct cam_control *)arg;
+
+	if (!fctrl || !arg) {
+		CAM_ERR(CAM_FLASH, "fctrl/arg is NULL with arg:%pK fctrl%pK",
+			fctrl, arg);
+		return -EINVAL;
+	}
+
+	if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+		CAM_ERR(CAM_FLASH, "Invalid handle type: %d",
+			cmd->handle_type);
+		return -EINVAL;
+	}
+
+	mutex_lock(&(fctrl->flash_mutex));
+	switch (cmd->op_code) {
+	case CAM_ACQUIRE_DEV: {
+		struct cam_sensor_acquire_dev flash_acq_dev;
+		struct cam_create_dev_hdl bridge_params;
+
+		CAM_DBG(CAM_FLASH, "CAM_ACQUIRE_DEV");
+
+		if (fctrl->flash_state != CAM_FLASH_STATE_INIT) {
+			CAM_ERR(CAM_FLASH,
+				"Cannot apply Acquire dev: Prev state: %d",
+				fctrl->flash_state);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if (fctrl->bridge_intf.device_hdl != -1) {
+			CAM_ERR(CAM_FLASH, "Device is already acquired");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		rc = copy_from_user(&flash_acq_dev,
+			u64_to_user_ptr(cmd->handle),
+			sizeof(flash_acq_dev));
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "Failed Copying from User");
+			goto release_mutex;
+		}
+
+		bridge_params.session_hdl = flash_acq_dev.session_handle;
+		bridge_params.ops = &fctrl->bridge_intf.ops;
+		bridge_params.v4l2_sub_dev_flag = 0;
+		bridge_params.media_entity_flag = 0;
+		bridge_params.priv = fctrl;
+
+		flash_acq_dev.device_handle =
+			cam_create_device_hdl(&bridge_params);
+		fctrl->bridge_intf.device_hdl =
+			flash_acq_dev.device_handle;
+		fctrl->bridge_intf.session_hdl =
+			flash_acq_dev.session_handle;
+
+		rc = copy_to_user(u64_to_user_ptr(cmd->handle),
+			&flash_acq_dev,
+			sizeof(struct cam_sensor_acquire_dev));
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "Failed Copy to User with rc = %d",
+				rc);
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+		fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
+		break;
+	}
+	case CAM_RELEASE_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_RELEASE_DEV");
+		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_START)) {
+			CAM_WARN(CAM_FLASH,
+				"Wrong state for Release dev: Prev state:%d",
+				fctrl->flash_state);
+		}
+
+		if (fctrl->bridge_intf.device_hdl == -1 &&
+			fctrl->flash_state == CAM_FLASH_STATE_ACQUIRE) {
+			CAM_ERR(CAM_FLASH,
+				"Invalid Handle: Link Hdl: %d device hdl: %d",
+				fctrl->bridge_intf.device_hdl,
+				fctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if (fctrl->bridge_intf.link_hdl != -1) {
+			CAM_ERR(CAM_SENSOR,
+				"Device [%d] still active on link 0x%x",
+				fctrl->flash_state,
+				fctrl->bridge_intf.link_hdl);
+			rc = -EAGAIN;
+			goto release_mutex;
+		}
+
+		if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_START))
+			fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+
+		if (cam_flash_release_dev(fctrl))
+			CAM_WARN(CAM_FLASH,
+				"Failed in destroying the device Handle");
+
+		if (fctrl->func_tbl.power_ops(fctrl, false))
+			CAM_WARN(CAM_FLASH, "Power Down Failed");
+
+		fctrl->flash_state = CAM_FLASH_STATE_INIT;
+		break;
+	}
+	case CAM_QUERY_CAP: {
+		struct cam_flash_query_cap_info flash_cap = {0};
+
+		CAM_DBG(CAM_FLASH, "CAM_QUERY_CAP");
+		flash_cap.slot_info = fctrl->soc_info.index;
+		for (i = 0; i < fctrl->flash_num_sources; i++) {
+			flash_cap.max_current_flash[i] =
+				soc_private->flash_max_current[i];
+			flash_cap.max_duration_flash[i] =
+				soc_private->flash_max_duration[i];
+		}
+
+		for (i = 0; i < fctrl->torch_num_sources; i++)
+			flash_cap.max_current_torch[i] =
+				soc_private->torch_max_current[i];
+
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&flash_cap, sizeof(struct cam_flash_query_cap_info))) {
+			CAM_ERR(CAM_FLASH, "Failed Copy to User");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+		break;
+	}
+	case CAM_START_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_START_DEV");
+		if ((fctrl->flash_state == CAM_FLASH_STATE_INIT) ||
+			(fctrl->flash_state == CAM_FLASH_STATE_START)) {
+			CAM_WARN(CAM_FLASH,
+				"Cannot apply Start Dev: Prev state: %d",
+				fctrl->flash_state);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		fctrl->flash_state = CAM_FLASH_STATE_START;
+		break;
+	}
+	case CAM_STOP_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_STOP_DEV ENTER");
+		if (fctrl->flash_state != CAM_FLASH_STATE_START) {
+			CAM_WARN(CAM_FLASH,
+				"Cannot apply Stop dev: Prev state is: %d",
+				fctrl->flash_state);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
+		fctrl->last_flush_req = 0;
+		fctrl->flash_state = CAM_FLASH_STATE_ACQUIRE;
+		break;
+	}
+	case CAM_CONFIG_DEV: {
+		CAM_DBG(CAM_FLASH, "CAM_CONFIG_DEV");
+		rc = fctrl->func_tbl.parser(fctrl, arg);
+		if (rc) {
+			CAM_ERR(CAM_FLASH, "Failed Flash Config: rc=%d\n", rc);
+			goto release_mutex;
+		}
+		break;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Invalid Opcode: %d", cmd->op_code);
+		rc = -EINVAL;
+	}
+
+release_mutex:
+	mutex_unlock(&(fctrl->flash_mutex));
+	return rc;
+}
+
+static int32_t cam_flash_init_default_params(struct cam_flash_ctrl *fctrl)
+{
+	/* Validate input parameters */
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "failed: invalid params fctrl %pK",
+			fctrl);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_FLASH,
+		"master_type: %d", fctrl->io_master_info.master_type);
+	/* Initialize cci_client */
+	if (fctrl->io_master_info.master_type == CCI_MASTER) {
+		fctrl->io_master_info.cci_client = kzalloc(sizeof(
+			struct cam_sensor_cci_client), GFP_KERNEL);
+		if (!(fctrl->io_master_info.cci_client))
+			return -ENOMEM;
+	} else if (fctrl->io_master_info.master_type == I2C_MASTER) {
+		if (!(fctrl->io_master_info.client))
+			return -EINVAL;
+	} else {
+		CAM_ERR(CAM_FLASH,
+			"Invalid master / Master type Not supported");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id cam_flash_dt_match[] = {
+	{.compatible = "qcom,camera-flash", .data = NULL},
+	{}
+};
+
+static long cam_flash_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int rc = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+	struct cam_flash_private_soc *soc_private = NULL;
+
+	CAM_DBG(CAM_FLASH, "Enter");
+
+	fctrl = v4l2_get_subdevdata(sd);
+	soc_private = fctrl->soc_info.soc_private;
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL: {
+		rc = cam_flash_driver_cmd(fctrl, arg,
+			soc_private);
+		break;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Invalid ioctl cmd type");
+		rc = -EINVAL;
+		break;
+	}
+
+	CAM_DBG(CAM_FLASH, "Exit");
+	return rc;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_flash_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_FLASH,
+			"Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL: {
+		rc = cam_flash_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc)
+			CAM_ERR(CAM_FLASH, "cam_flash_ioctl failed");
+		break;
+	}
+	default:
+		CAM_ERR(CAM_FLASH, "Invalid compat ioctl cmd_type:%d",
+			cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_FLASH,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+#endif
+
+static int cam_flash_platform_remove(struct platform_device *pdev)
+{
+	struct cam_flash_ctrl *fctrl;
+
+	fctrl = platform_get_drvdata(pdev);
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Flash device is NULL");
+		return 0;
+	}
+
+	devm_kfree(&pdev->dev, fctrl);
+
+	return 0;
+}
+
+static int32_t cam_flash_i2c_driver_remove(struct i2c_client *client)
+{
+	int32_t rc = 0;
+	struct cam_flash_ctrl *fctrl = i2c_get_clientdata(client);
+	/* Handle I2C Devices */
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Flash device is NULL");
+		return -EINVAL;
+	}
+	/*Free Allocated Mem */
+	kfree(fctrl->i2c_data.per_frame);
+	fctrl->i2c_data.per_frame = NULL;
+	kfree(fctrl);
+	return rc;
+}
+
+static int cam_flash_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_flash_ctrl *fctrl =
+		v4l2_get_subdevdata(sd);
+
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "Flash ctrl ptr is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&fctrl->flash_mutex);
+	cam_flash_shutdown(fctrl);
+	mutex_unlock(&fctrl->flash_mutex);
+
+	return 0;
+}
+
+static struct v4l2_subdev_core_ops cam_flash_subdev_core_ops = {
+	.ioctl = cam_flash_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_flash_subdev_do_ioctl
+#endif
+};
+
+static struct v4l2_subdev_ops cam_flash_subdev_ops = {
+	.core = &cam_flash_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_flash_internal_ops = {
+	.close = cam_flash_subdev_close,
+};
+
+static int cam_flash_init_subdev(struct cam_flash_ctrl *fctrl)
+{
+	int rc = 0;
+
+	fctrl->v4l2_dev_str.internal_ops =
+		&cam_flash_internal_ops;
+	fctrl->v4l2_dev_str.ops = &cam_flash_subdev_ops;
+	fctrl->v4l2_dev_str.name = CAMX_FLASH_DEV_NAME;
+	fctrl->v4l2_dev_str.sd_flags =
+		V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
+	fctrl->v4l2_dev_str.ent_function = CAM_FLASH_DEVICE_TYPE;
+	fctrl->v4l2_dev_str.token = fctrl;
+
+	rc = cam_register_subdev(&(fctrl->v4l2_dev_str));
+	if (rc)
+		CAM_ERR(CAM_FLASH, "Fail to create subdev with %d", rc);
+
+	return rc;
+}
+
+static int32_t cam_flash_platform_probe(struct platform_device *pdev)
+{
+	int32_t rc = 0, i = 0;
+	struct cam_flash_ctrl *fctrl = NULL;
+
+	CAM_DBG(CAM_FLASH, "Enter");
+	if (!pdev->dev.of_node) {
+		CAM_ERR(CAM_FLASH, "of_node NULL");
+		return -EINVAL;
+	}
+
+	fctrl = kzalloc(sizeof(struct cam_flash_ctrl), GFP_KERNEL);
+	if (!fctrl)
+		return -ENOMEM;
+
+	fctrl->pdev = pdev;
+	fctrl->soc_info.pdev = pdev;
+	fctrl->soc_info.dev = &pdev->dev;
+	fctrl->soc_info.dev_name = pdev->name;
+
+	platform_set_drvdata(pdev, fctrl);
+
+	rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "cam_flash_get_dt_data failed with %d", rc);
+		kfree(fctrl);
+		return -EINVAL;
+	}
+
+	if (of_find_property(pdev->dev.of_node, "cci-master", NULL)) {
+		/* Get CCI master */
+		rc = of_property_read_u32(pdev->dev.of_node, "cci-master",
+			&fctrl->cci_i2c_master);
+		CAM_DBG(CAM_FLASH, "cci-master %d, rc %d",
+			fctrl->cci_i2c_master, rc);
+		if (rc < 0) {
+			/* Set default master 0 */
+			fctrl->cci_i2c_master = MASTER_0;
+			rc = 0;
+		}
+
+		fctrl->io_master_info.master_type = CCI_MASTER;
+		rc = cam_flash_init_default_params(fctrl);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"failed: cam_flash_init_default_params rc %d",
+				rc);
+			return rc;
+		}
+
+		fctrl->i2c_data.per_frame =
+			kzalloc(sizeof(struct i2c_settings_array) *
+			MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+		if (fctrl->i2c_data.per_frame == NULL) {
+			CAM_ERR(CAM_FLASH, "No Memory");
+			rc = -ENOMEM;
+			goto free_cci_resource;
+		}
+
+		INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head));
+		INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head));
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+			INIT_LIST_HEAD(
+				&(fctrl->i2c_data.per_frame[i].list_head));
+
+		fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser;
+		fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting;
+		fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops;
+		fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request;
+	} else {
+		/* PMIC Flash */
+		fctrl->func_tbl.parser = cam_flash_pmic_pkt_parser;
+		fctrl->func_tbl.apply_setting = cam_flash_pmic_apply_setting;
+		fctrl->func_tbl.power_ops = cam_flash_pmic_power_ops;
+		fctrl->func_tbl.flush_req = cam_flash_pmic_flush_request;
+	}
+
+	rc = cam_flash_init_subdev(fctrl);
+	if (rc) {
+		if (fctrl->io_master_info.cci_client != NULL)
+			goto free_cci_resource;
+		else
+			goto free_resource;
+	}
+
+	fctrl->bridge_intf.device_hdl = -1;
+	fctrl->bridge_intf.link_hdl = -1;
+	fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+	fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+	fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+	fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+	fctrl->last_flush_req = 0;
+
+	mutex_init(&(fctrl->flash_mutex));
+
+	fctrl->flash_state = CAM_FLASH_STATE_INIT;
+	CAM_DBG(CAM_FLASH, "Probe success");
+	return rc;
+
+free_cci_resource:
+	kfree(fctrl->io_master_info.cci_client);
+	fctrl->io_master_info.cci_client = NULL;
+free_resource:
+	kfree(fctrl->i2c_data.per_frame);
+	kfree(fctrl->soc_info.soc_private);
+	cam_soc_util_release_platform_resource(&fctrl->soc_info);
+	fctrl->i2c_data.per_frame = NULL;
+	fctrl->soc_info.soc_private = NULL;
+	kfree(fctrl);
+	fctrl = NULL;
+	return rc;
+}
+
+static int32_t cam_flash_i2c_driver_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int32_t rc = 0, i = 0;
+	struct cam_flash_ctrl *fctrl;
+
+	if (client == NULL || id == NULL) {
+		CAM_ERR(CAM_FLASH, "Invalid Args client: %pK id: %pK",
+			client, id);
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CAM_ERR(CAM_FLASH, "%s :: i2c_check_functionality failed",
+			 client->name);
+		return -EFAULT;
+	}
+
+	/* Create sensor control structure */
+	fctrl = kzalloc(sizeof(*fctrl), GFP_KERNEL);
+	if (!fctrl)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, fctrl);
+
+	fctrl->io_master_info.client = client;
+	fctrl->soc_info.dev = &client->dev;
+	fctrl->soc_info.dev_name = client->name;
+	fctrl->io_master_info.master_type = I2C_MASTER;
+
+	rc = cam_flash_get_dt_data(fctrl, &fctrl->soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "failed: cam_sensor_parse_dt rc %d", rc);
+		goto free_ctrl;
+	}
+
+	rc = cam_flash_init_subdev(fctrl);
+	if (rc)
+		goto free_ctrl;
+
+	fctrl->i2c_data.per_frame =
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (fctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto unreg_subdev;
+	}
+
+	INIT_LIST_HEAD(&(fctrl->i2c_data.init_settings.list_head));
+	INIT_LIST_HEAD(&(fctrl->i2c_data.config_settings.list_head));
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(fctrl->i2c_data.per_frame[i].list_head));
+
+	fctrl->func_tbl.parser = cam_flash_i2c_pkt_parser;
+	fctrl->func_tbl.apply_setting = cam_flash_i2c_apply_setting;
+	fctrl->func_tbl.power_ops = cam_flash_i2c_power_ops;
+	fctrl->func_tbl.flush_req = cam_flash_i2c_flush_request;
+
+	fctrl->bridge_intf.device_hdl = -1;
+	fctrl->bridge_intf.link_hdl = -1;
+	fctrl->bridge_intf.ops.get_dev_info = cam_flash_publish_dev_info;
+	fctrl->bridge_intf.ops.link_setup = cam_flash_establish_link;
+	fctrl->bridge_intf.ops.apply_req = cam_flash_apply_request;
+	fctrl->bridge_intf.ops.flush_req = cam_flash_flush_request;
+	fctrl->last_flush_req = 0;
+
+	mutex_init(&(fctrl->flash_mutex));
+	fctrl->flash_state = CAM_FLASH_STATE_INIT;
+
+	return rc;
+
+unreg_subdev:
+	cam_unregister_subdev(&(fctrl->v4l2_dev_str));
+free_ctrl:
+	kfree(fctrl);
+	fctrl = NULL;
+	return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_flash_dt_match);
+
+static struct platform_driver cam_flash_platform_driver = {
+	.probe = cam_flash_platform_probe,
+	.remove = cam_flash_platform_remove,
+	.driver = {
+		.name = "CAM-FLASH-DRIVER",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_flash_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static const struct i2c_device_id i2c_id[] = {
+	{FLASH_DRIVER_I2C, (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_flash_i2c_driver = {
+	.id_table = i2c_id,
+	.probe  = cam_flash_i2c_driver_probe,
+	.remove = cam_flash_i2c_driver_remove,
+	.driver = {
+		.name = FLASH_DRIVER_I2C,
+	},
+};
+
+static int32_t __init cam_flash_init_module(void)
+{
+	int32_t rc = 0;
+
+	rc = platform_driver_register(&cam_flash_platform_driver);
+	if (rc == 0) {
+		CAM_DBG(CAM_FLASH, "platform probe success");
+		return 0;
+	}
+
+	rc = i2c_add_driver(&cam_flash_i2c_driver);
+	if (rc)
+		CAM_ERR(CAM_FLASH, "i2c_add_driver failed rc: %d", rc);
+	return rc;
+}
+
+static void __exit cam_flash_exit_module(void)
+{
+	platform_driver_unregister(&cam_flash_platform_driver);
+	i2c_del_driver(&cam_flash_i2c_driver);
+}
+
+module_init(cam_flash_init_module);
+module_exit(cam_flash_exit_module);
+MODULE_DESCRIPTION("CAM FLASH");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
new file mode 100644
index 0000000..3ea8626c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_dev.h
@@ -0,0 +1,223 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FLASH_DEV_H_
+#define _CAM_FLASH_DEV_H_
+
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/leds-qpnp-flash.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/cam_sensor.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_util.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_subdev.h"
+#include "cam_mem_mgr.h"
+#include "cam_sensor_cmn_header.h"
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+#include "cam_sensor_io.h"
+#include "cam_flash_core.h"
+
+#define CAMX_FLASH_DEV_NAME "cam-flash-dev"
+
+#define CAM_FLASH_PIPELINE_DELAY 1
+
+#define FLASH_DRIVER_I2C "i2c_flash"
+
+#define CAM_FLASH_PACKET_OPCODE_INIT                 0
+#define CAM_FLASH_PACKET_OPCODE_SET_OPS              1
+#define CAM_FLASH_PACKET_OPCODE_NON_REALTIME_SET_OPS 2
+
+struct cam_flash_ctrl;
+
+enum cam_flash_switch_trigger_ops {
+	LED_SWITCH_OFF = 0,
+	LED_SWITCH_ON,
+};
+
+enum cam_flash_state {
+	CAM_FLASH_STATE_INIT,
+	CAM_FLASH_STATE_ACQUIRE,
+	CAM_FLASH_STATE_CONFIG,
+	CAM_FLASH_STATE_START,
+};
+
+enum cam_flash_flush_type {
+	FLUSH_ALL = 0,
+	FLUSH_REQ,
+	FLUSH_MAX,
+};
+
+/**
+ * struct cam_flash_intf_params
+ * @device_hdl   : Device Handle
+ * @session_hdl  : Session Handle
+ * @link_hdl     : Link Handle
+ * @ops          : KMD operations
+ * @crm_cb       : Callback API pointers
+ */
+struct cam_flash_intf_params {
+	int32_t                     device_hdl;
+	int32_t                     session_hdl;
+	int32_t                     link_hdl;
+	struct cam_req_mgr_kmd_ops  ops;
+	struct cam_req_mgr_crm_cb  *crm_cb;
+};
+
+/**
+ * struct cam_flash_common_attr
+ * @is_settings_valid  : Notify the valid settings
+ * @request_id         : Request id provided by umd
+ * @count              : Number of led count
+ * @cmd_type           : Command buffer type
+ */
+struct cam_flash_common_attr {
+	bool      is_settings_valid;
+	uint64_t  request_id;
+	uint16_t  count;
+	uint8_t   cmd_type;
+};
+
+/**
+ * struct flash_init_packet
+ * @cmn_attr   : Provides common attributes
+ * @flash_type : Flash type(PMIC/I2C/GPIO)
+ */
+struct cam_flash_init_packet {
+	struct cam_flash_common_attr  cmn_attr;
+	uint8_t                       flash_type;
+};
+
+/**
+ * struct flash_frame_setting
+ * @cmn_attr         : Provides common attributes
+ * @num_iterations   : Iterations used to perform RER
+ * @led_on_delay_ms  : LED on time in milisec
+ * @led_off_delay_ms : LED off time in milisec
+ * @opcode           : Command buffer opcode
+ * @led_current_ma[] : LED current array in miliamps
+ *
+ */
+struct cam_flash_frame_setting {
+	struct cam_flash_common_attr cmn_attr;
+	uint16_t                     num_iterations;
+	uint16_t                     led_on_delay_ms;
+	uint16_t                     led_off_delay_ms;
+	int8_t                       opcode;
+	uint32_t                     led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
+};
+
+/**
+ *  struct cam_flash_private_soc
+ * @switch_trigger_name : Switch trigger name
+ * @flash_trigger_name  : Flash trigger name array
+ * @flash_op_current    : Flash operational current
+ * @flash_max_current   : Max supported current for LED in flash mode
+ * @flash_max_duration  : Max turn on duration for LED in Flash mode
+ * @torch_trigger_name  : Torch trigger name array
+ * @torch_op_current    : Torch operational current
+ * @torch_max_current   : Max supported current for LED in torch mode
+ * @is_wled_flash       : Detection between WLED/LED flash
+ */
+
+struct cam_flash_private_soc {
+	const char   *switch_trigger_name;
+	const char   *flash_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     flash_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     flash_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     flash_max_duration[CAM_FLASH_MAX_LED_TRIGGERS];
+	const char   *torch_trigger_name[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     torch_op_current[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t     torch_max_current[CAM_FLASH_MAX_LED_TRIGGERS];
+	bool         is_wled_flash;
+};
+
+struct cam_flash_func_tbl {
+	int (*parser)(struct cam_flash_ctrl *fctrl, void *arg);
+	int (*apply_setting)(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+	int (*power_ops)(struct cam_flash_ctrl *fctrl, bool regulator_enable);
+	int (*flush_req)(struct cam_flash_ctrl *fctrl,
+		enum cam_flash_flush_type type, uint64_t req_id);
+};
+
+/**
+ *  struct cam_flash_ctrl
+ * @soc_info            : Soc related information
+ * @pdev                : Platform device
+ * @per_frame[]         : Per_frame setting array
+ * @nrt_info            : NonRealTime settings
+ * @of_node             : Of Node ptr
+ * @v4l2_dev_str        : V4L2 device structure
+ * @bridge_intf         : CRM interface
+ * @flash_init_setting  : Init command buffer structure
+ * @switch_trigger      : Switch trigger ptr
+ * @flash_num_sources   : Number of flash sources
+ * @torch_num_source    : Number of torch sources
+ * @flash_mutex         : Mutex for flash operations
+ * @flash_state         : Current flash state (LOW/OFF/ON/INIT)
+ * @flash_type          : Flash types (PMIC/I2C/GPIO)
+ * @is_regulator_enable : Regulator disable/enable notifier
+ * @func_tbl            : Function table for different HW
+ *	                      (e.g. i2c/pmic/gpio)
+ * @flash_trigger       : Flash trigger ptr
+ * @torch_trigger       : Torch trigger ptr
+ * @cci_i2c_master      : I2C structure
+ * @io_master_info      : Information about the communication master
+ * @i2c_data            : I2C register settings
+ * @last_flush_req      : last request to flush
+ */
+struct cam_flash_ctrl {
+	struct cam_hw_soc_info              soc_info;
+	struct platform_device             *pdev;
+	struct cam_sensor_power_ctrl_t      power_info;
+	struct cam_flash_frame_setting      per_frame[MAX_PER_FRAME_ARRAY];
+	struct cam_flash_frame_setting      nrt_info;
+	struct device_node                 *of_node;
+	struct cam_subdev                   v4l2_dev_str;
+	struct cam_flash_intf_params        bridge_intf;
+	struct cam_flash_init_packet        flash_init_setting;
+	struct led_trigger                 *switch_trigger;
+	uint32_t                            flash_num_sources;
+	uint32_t                            torch_num_sources;
+	struct mutex                        flash_mutex;
+	enum   cam_flash_state              flash_state;
+	uint8_t                             flash_type;
+	bool                                is_regulator_enabled;
+	struct cam_flash_func_tbl           func_tbl;
+	struct led_trigger           *flash_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+	struct led_trigger           *torch_trigger[CAM_FLASH_MAX_LED_TRIGGERS];
+/* I2C related setting */
+	enum   cci_i2c_master_t             cci_i2c_master;
+	struct camera_io_master             io_master_info;
+	struct i2c_data_settings            i2c_data;
+	uint32_t                            last_flush_req;
+};
+
+int cam_flash_pmic_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_i2c_pkt_parser(struct cam_flash_ctrl *fctrl, void *arg);
+int cam_flash_pmic_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_i2c_apply_setting(struct cam_flash_ctrl *fctrl, uint64_t req_id);
+int cam_flash_off(struct cam_flash_ctrl *fctrl);
+int cam_flash_pmic_power_ops(struct cam_flash_ctrl *fctrl,
+	bool regulator_enable);
+int cam_flash_i2c_power_ops(struct cam_flash_ctrl *fctrl,
+	bool regulator_enable);
+int cam_flash_i2c_flush_request(struct cam_flash_ctrl *fctrl,
+	enum cam_flash_flush_type type, uint64_t req_id);
+int cam_flash_pmic_flush_request(struct cam_flash_ctrl *fctrl,
+	enum cam_flash_flush_type, uint64_t req_id);
+void cam_flash_shutdown(struct cam_flash_ctrl *fctrl);
+int cam_flash_release_dev(struct cam_flash_ctrl *fctrl);
+
+#endif /*_CAM_FLASH_DEV_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
new file mode 100644
index 0000000..61c8ee7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.c
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include "cam_flash_soc.h"
+#include "cam_res_mgr_api.h"
+
+static int32_t cam_get_source_node_info(
+	struct device_node *of_node,
+	struct cam_flash_ctrl *fctrl,
+	struct cam_flash_private_soc *soc_private)
+{
+	int32_t rc = 0;
+	uint32_t count = 0, i = 0;
+	struct device_node *flash_src_node = NULL;
+	struct device_node *torch_src_node = NULL;
+	struct device_node *switch_src_node = NULL;
+
+	soc_private->is_wled_flash =
+		of_property_read_bool(of_node, "wled-flash-support");
+
+	switch_src_node = of_parse_phandle(of_node, "switch-source", 0);
+	if (!switch_src_node) {
+		CAM_WARN(CAM_FLASH, "switch_src_node NULL");
+	} else {
+		rc = of_property_read_string(switch_src_node,
+			"qcom,default-led-trigger",
+			&soc_private->switch_trigger_name);
+		if (rc) {
+			CAM_ERR(CAM_FLASH,
+				"default-led-trigger read failed rc=%d", rc);
+		} else {
+			CAM_DBG(CAM_FLASH, "switch trigger %s",
+				soc_private->switch_trigger_name);
+			cam_res_mgr_led_trigger_register(
+				soc_private->switch_trigger_name,
+				&fctrl->switch_trigger);
+		}
+
+		of_node_put(switch_src_node);
+	}
+
+	if (of_get_property(of_node, "flash-source", &count)) {
+		count /= sizeof(uint32_t);
+
+		if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+			CAM_ERR(CAM_FLASH, "Invalid LED count: %d", count);
+			return -EINVAL;
+		}
+
+		fctrl->flash_num_sources = count;
+
+		for (i = 0; i < count; i++) {
+			flash_src_node = of_parse_phandle(of_node,
+				"flash-source", i);
+			if (!flash_src_node) {
+				CAM_WARN(CAM_FLASH, "flash_src_node NULL");
+				continue;
+			}
+
+			rc = of_property_read_string(flash_src_node,
+				"qcom,default-led-trigger",
+				&soc_private->flash_trigger_name[i]);
+			if (rc) {
+				CAM_WARN(CAM_FLASH,
+				"defalut-led-trigger read failed rc=%d", rc);
+				of_node_put(flash_src_node);
+				continue;
+			}
+
+			CAM_DBG(CAM_FLASH, "Flash default trigger %s",
+				soc_private->flash_trigger_name[i]);
+			cam_res_mgr_led_trigger_register(
+				soc_private->flash_trigger_name[i],
+				&fctrl->flash_trigger[i]);
+
+			if (soc_private->is_wled_flash) {
+				rc = wled_flash_led_prepare(
+					fctrl->flash_trigger[i],
+					QUERY_MAX_CURRENT,
+					&soc_private->flash_max_current[i]);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"WLED FLASH max_current read fail: %d",
+						rc);
+					of_node_put(flash_src_node);
+					rc = 0;
+					continue;
+				}
+			} else {
+				rc = of_property_read_u32(flash_src_node,
+					"qcom,max-current",
+					&soc_private->flash_max_current[i]);
+				if (rc < 0) {
+					CAM_WARN(CAM_FLASH,
+					"LED FLASH max-current read fail: %d",
+						rc);
+					of_node_put(flash_src_node);
+					continue;
+				}
+			}
+
+			/* Read operational-current */
+			rc = of_property_read_u32(flash_src_node,
+				"qcom,current-ma",
+				&soc_private->flash_op_current[i]);
+			if (rc) {
+				CAM_INFO(CAM_FLASH, "op-current: read failed");
+				rc = 0;
+			}
+
+			/* Read max-duration */
+			rc = of_property_read_u32(flash_src_node,
+				"qcom,duration-ms",
+				&soc_private->flash_max_duration[i]);
+			if (rc) {
+				CAM_INFO(CAM_FLASH,
+					"max-duration prop unavailable: %d",
+					rc);
+				rc = 0;
+			}
+			of_node_put(flash_src_node);
+
+			CAM_DBG(CAM_FLASH, "MainFlashMaxCurrent[%d]: %d",
+				i, soc_private->flash_max_current[i]);
+		}
+	}
+
+	if (of_get_property(of_node, "torch-source", &count)) {
+		count /= sizeof(uint32_t);
+		if (count > CAM_FLASH_MAX_LED_TRIGGERS) {
+			CAM_ERR(CAM_FLASH, "Invalid LED count : %d", count);
+			return -EINVAL;
+		}
+
+		fctrl->torch_num_sources = count;
+
+		CAM_DBG(CAM_FLASH, "torch_num_sources = %d",
+			fctrl->torch_num_sources);
+		for (i = 0; i < count; i++) {
+			torch_src_node = of_parse_phandle(of_node,
+				"torch-source", i);
+			if (!torch_src_node) {
+				CAM_WARN(CAM_FLASH, "torch_src_node NULL");
+				continue;
+			}
+
+			rc = of_property_read_string(torch_src_node,
+				"qcom,default-led-trigger",
+				&soc_private->torch_trigger_name[i]);
+			if (rc < 0) {
+				CAM_WARN(CAM_FLASH,
+					"default-trigger read failed");
+				of_node_put(torch_src_node);
+				continue;
+			}
+
+			CAM_DBG(CAM_FLASH, "Torch default trigger %s",
+				soc_private->torch_trigger_name[i]);
+			cam_res_mgr_led_trigger_register(
+				soc_private->torch_trigger_name[i],
+				&fctrl->torch_trigger[i]);
+
+			if (soc_private->is_wled_flash) {
+				rc = wled_flash_led_prepare(
+					fctrl->torch_trigger[i],
+					QUERY_MAX_CURRENT,
+					&soc_private->torch_max_current[i]);
+				if (rc) {
+					CAM_ERR(CAM_FLASH,
+					"WLED TORCH max_current read fail: %d",
+					rc);
+					of_node_put(torch_src_node);
+					continue;
+				}
+			} else {
+				rc = of_property_read_u32(torch_src_node,
+					"qcom,max-current",
+					&soc_private->torch_max_current[i]);
+				if (rc < 0) {
+					CAM_WARN(CAM_FLASH,
+					"LED-TORCH max-current read failed: %d",
+						rc);
+					of_node_put(torch_src_node);
+					continue;
+				}
+			}
+
+			/* Read operational-current */
+			rc = of_property_read_u32(torch_src_node,
+				"qcom,current-ma",
+				&soc_private->torch_op_current[i]);
+			if (rc < 0) {
+				CAM_WARN(CAM_FLASH,
+					"op-current prop unavailable: %d", rc);
+				rc = 0;
+			}
+
+			of_node_put(torch_src_node);
+
+			CAM_DBG(CAM_FLASH, "TorchMaxCurrent[%d]: %d",
+				i, soc_private->torch_max_current[i]);
+		}
+	}
+
+	return rc;
+}
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+	struct cam_hw_soc_info *soc_info)
+{
+	int32_t rc = 0;
+	struct device_node *of_node = NULL;
+
+	if (!fctrl) {
+		CAM_ERR(CAM_FLASH, "NULL flash control structure");
+		return -EINVAL;
+	}
+
+	soc_info->soc_private =
+		kzalloc(sizeof(struct cam_flash_private_soc), GFP_KERNEL);
+	if (!soc_info->soc_private) {
+		rc = -ENOMEM;
+		goto release_soc_res;
+	}
+	of_node = fctrl->pdev->dev.of_node;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_FLASH, "Get_dt_properties failed rc %d", rc);
+		goto free_soc_private;
+	}
+
+	rc = cam_get_source_node_info(of_node, fctrl, soc_info->soc_private);
+	if (rc) {
+		CAM_ERR(CAM_FLASH,
+			"cam_flash_get_pmic_source_info failed rc %d", rc);
+		goto free_soc_private;
+	}
+	return rc;
+
+free_soc_private:
+	kfree(soc_info->soc_private);
+	soc_info->soc_private = NULL;
+release_soc_res:
+	cam_soc_util_release_platform_resource(soc_info);
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h
new file mode 100644
index 0000000..04c76e0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_soc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_FLASH_SOC_H_
+#define _CAM_FLASH_SOC_H_
+
+#include "cam_flash_dev.h"
+
+int cam_flash_get_dt_data(struct cam_flash_ctrl *fctrl,
+	struct cam_hw_soc_info *soc_info);
+
+#endif /*_CAM_FLASH_SOC_H_*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
new file mode 100644
index 0000000..5380910
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_ois_dev.o cam_ois_core.o cam_ois_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
new file mode 100644
index 0000000..c5a09bb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.c
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <cam_sensor_cmn_header.h>
+#include "cam_ois_core.h"
+#include "cam_ois_soc.h"
+#include "cam_sensor_util.h"
+#include "cam_debug_util.h"
+#include "cam_res_mgr_api.h"
+#include "cam_common_util.h"
+
+int32_t cam_ois_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int rc = 0;
+
+	power_info->power_setting_size = 1;
+	power_info->power_setting =
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_setting[0].seq_val = CAM_VAF;
+	power_info->power_setting[0].config_val = 1;
+	power_info->power_setting[0].delay = 2;
+
+	power_info->power_down_setting_size = 1;
+	power_info->power_down_setting =
+		kzalloc(sizeof(struct cam_sensor_power_setting),
+			GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		rc = -ENOMEM;
+		goto free_power_settings;
+	}
+
+	power_info->power_down_setting[0].seq_type = SENSOR_VAF;
+	power_info->power_down_setting[0].seq_val = CAM_VAF;
+	power_info->power_down_setting[0].config_val = 0;
+
+	return rc;
+
+free_power_settings:
+	kfree(power_info->power_setting);
+	power_info->power_setting = NULL;
+	power_info->power_setting_size = 0;
+	return rc;
+}
+
+
+/**
+ * cam_ois_get_dev_handle - get device handle
+ * @o_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int cam_ois_get_dev_handle(struct cam_ois_ctrl_t *o_ctrl,
+	void *arg)
+{
+	struct cam_sensor_acquire_dev    ois_acq_dev;
+	struct cam_create_dev_hdl        bridge_params;
+	struct cam_control              *cmd = (struct cam_control *)arg;
+
+	if (o_ctrl->bridge_intf.device_hdl != -1) {
+		CAM_ERR(CAM_OIS, "Device is already acquired");
+		return -EFAULT;
+	}
+	if (copy_from_user(&ois_acq_dev, u64_to_user_ptr(cmd->handle),
+		sizeof(ois_acq_dev)))
+		return -EFAULT;
+
+	bridge_params.session_hdl = ois_acq_dev.session_handle;
+	bridge_params.ops = &o_ctrl->bridge_intf.ops;
+	bridge_params.v4l2_sub_dev_flag = 0;
+	bridge_params.media_entity_flag = 0;
+	bridge_params.priv = o_ctrl;
+
+	ois_acq_dev.device_handle =
+		cam_create_device_hdl(&bridge_params);
+	o_ctrl->bridge_intf.device_hdl = ois_acq_dev.device_handle;
+	o_ctrl->bridge_intf.session_hdl = ois_acq_dev.session_handle;
+
+	CAM_DBG(CAM_OIS, "Device Handle: %d", ois_acq_dev.device_handle);
+	if (copy_to_user(u64_to_user_ptr(cmd->handle), &ois_acq_dev,
+		sizeof(struct cam_sensor_acquire_dev))) {
+		CAM_ERR(CAM_OIS, "ACQUIRE_DEV: copy to user failed");
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static int cam_ois_power_up(struct cam_ois_ctrl_t *o_ctrl)
+{
+	int                             rc = 0;
+	struct cam_hw_soc_info          *soc_info =
+		&o_ctrl->soc_info;
+	struct cam_ois_soc_private *soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info;
+
+	soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	if ((power_info->power_setting == NULL) &&
+		(power_info->power_down_setting == NULL)) {
+		CAM_INFO(CAM_OIS,
+			"Using default power settings");
+		rc = cam_ois_construct_default_power_setting(power_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_OIS,
+				"Construct default ois power setting failed.");
+			return rc;
+		}
+	}
+
+	/* Parse and fill vreg params for power up settings */
+	rc = msm_camera_fill_vreg_params(
+		soc_info,
+		power_info->power_setting,
+		power_info->power_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_OIS,
+			"failed to fill vreg params for power up rc:%d", rc);
+		return rc;
+	}
+
+	/* Parse and fill vreg params for power down settings*/
+	rc = msm_camera_fill_vreg_params(
+		soc_info,
+		power_info->power_down_setting,
+		power_info->power_down_setting_size);
+	if (rc) {
+		CAM_ERR(CAM_OIS,
+			"failed to fill vreg params for power down rc:%d", rc);
+		return rc;
+	}
+
+	power_info->dev = soc_info->dev;
+
+	rc = cam_sensor_core_power_up(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "failed in ois power up rc %d", rc);
+		return rc;
+	}
+
+	rc = camera_io_init(&o_ctrl->io_master_info);
+	if (rc)
+		CAM_ERR(CAM_OIS, "cci_init failed: rc: %d", rc);
+
+	return rc;
+}
+
+/**
+ * cam_ois_power_down - power down OIS device
+ * @o_ctrl:     ctrl structure
+ *
+ * Returns success or failure
+ */
+static int cam_ois_power_down(struct cam_ois_ctrl_t *o_ctrl)
+{
+	int32_t                         rc = 0;
+	struct cam_sensor_power_ctrl_t  *power_info;
+	struct cam_hw_soc_info          *soc_info =
+		&o_ctrl->soc_info;
+	struct cam_ois_soc_private *soc_private;
+
+	if (!o_ctrl) {
+		CAM_ERR(CAM_OIS, "failed: o_ctrl %pK", o_ctrl);
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+	soc_info = &o_ctrl->soc_info;
+
+	if (!power_info) {
+		CAM_ERR(CAM_OIS, "failed: power_info %pK", power_info);
+		return -EINVAL;
+	}
+
+	rc = cam_sensor_util_power_down(power_info, soc_info);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "power down the core is failed:%d", rc);
+		return rc;
+	}
+
+	camera_io_release(&o_ctrl->io_master_info);
+
+	return rc;
+}
+
+static int cam_ois_apply_settings(struct cam_ois_ctrl_t *o_ctrl,
+	struct i2c_settings_array *i2c_set)
+{
+	struct i2c_settings_list *i2c_list;
+	int32_t rc = 0;
+	uint32_t i, size;
+
+	if (o_ctrl == NULL || i2c_set == NULL) {
+		CAM_ERR(CAM_OIS, "Invalid Args");
+		return -EINVAL;
+	}
+
+	if (i2c_set->is_settings_valid != 1) {
+		CAM_ERR(CAM_OIS, " Invalid settings");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(i2c_list,
+		&(i2c_set->list_head), list) {
+		if (i2c_list->op_code ==  CAM_SENSOR_I2C_WRITE_RANDOM) {
+			rc = camera_io_dev_write(&(o_ctrl->io_master_info),
+				&(i2c_list->i2c_settings));
+			if (rc < 0) {
+				CAM_ERR(CAM_OIS,
+					"Failed in Applying i2c wrt settings");
+				return rc;
+			}
+		} else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
+			size = i2c_list->i2c_settings.size;
+			for (i = 0; i < size; i++) {
+				rc = camera_io_dev_poll(
+				&(o_ctrl->io_master_info),
+				i2c_list->i2c_settings.reg_setting[i].reg_addr,
+				i2c_list->i2c_settings.reg_setting[i].reg_data,
+				i2c_list->i2c_settings.reg_setting[i].data_mask,
+				i2c_list->i2c_settings.addr_type,
+				i2c_list->i2c_settings.data_type,
+				i2c_list->i2c_settings.reg_setting[i].delay);
+				if (rc < 0) {
+					CAM_ERR(CAM_OIS,
+						"i2c poll apply setting Fail");
+					return rc;
+				}
+			}
+		}
+	}
+
+	return rc;
+}
+
+static int cam_ois_slaveInfo_pkt_parser(struct cam_ois_ctrl_t *o_ctrl,
+	uint32_t *cmd_buf)
+{
+	int32_t rc = 0;
+	struct cam_cmd_ois_info *ois_info;
+
+	if (!o_ctrl || !cmd_buf) {
+		CAM_ERR(CAM_OIS, "Invalid Args");
+		return -EINVAL;
+	}
+
+	ois_info = (struct cam_cmd_ois_info *)cmd_buf;
+	if (o_ctrl->io_master_info.master_type == CCI_MASTER) {
+		o_ctrl->io_master_info.cci_client->i2c_freq_mode =
+			ois_info->i2c_freq_mode;
+		o_ctrl->io_master_info.cci_client->sid =
+			ois_info->slave_addr >> 1;
+		o_ctrl->ois_fw_flag = ois_info->ois_fw_flag;
+		o_ctrl->is_ois_calib = ois_info->is_ois_calib;
+		memcpy(o_ctrl->ois_name, ois_info->ois_name, 32);
+		o_ctrl->io_master_info.cci_client->retries = 3;
+		o_ctrl->io_master_info.cci_client->id_map = 0;
+		memcpy(&(o_ctrl->opcode), &(ois_info->opcode),
+			sizeof(struct cam_ois_opcode));
+		CAM_DBG(CAM_OIS, "Slave addr: 0x%x Freq Mode: %d",
+			ois_info->slave_addr, ois_info->i2c_freq_mode);
+	} else if (o_ctrl->io_master_info.master_type == I2C_MASTER) {
+		o_ctrl->io_master_info.client->addr = ois_info->slave_addr;
+		CAM_DBG(CAM_OIS, "Slave addr: 0x%x", ois_info->slave_addr);
+	} else {
+		CAM_ERR(CAM_OIS, "Invalid Master type : %d",
+			o_ctrl->io_master_info.master_type);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static int cam_ois_fw_download(struct cam_ois_ctrl_t *o_ctrl)
+{
+	uint16_t                           total_bytes = 0;
+	uint8_t                           *ptr = NULL;
+	int32_t                            rc = 0, cnt;
+	uint32_t                           fw_size;
+	const struct firmware             *fw = NULL;
+	const char                        *fw_name_prog = NULL;
+	const char                        *fw_name_coeff = NULL;
+	char                               name_prog[32] = {0};
+	char                               name_coeff[32] = {0};
+	struct device                     *dev = &(o_ctrl->pdev->dev);
+	struct cam_sensor_i2c_reg_setting  i2c_reg_setting;
+	struct page                       *page = NULL;
+
+	if (!o_ctrl) {
+		CAM_ERR(CAM_OIS, "Invalid Args");
+		return -EINVAL;
+	}
+
+	snprintf(name_coeff, 32, "%s.coeff", o_ctrl->ois_name);
+
+	snprintf(name_prog, 32, "%s.prog", o_ctrl->ois_name);
+
+	/* cast pointer as const pointer*/
+	fw_name_prog = name_prog;
+	fw_name_coeff = name_coeff;
+
+	/* Load FW */
+	rc = request_firmware(&fw, fw_name_prog, dev);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "Failed to locate %s", fw_name_prog);
+		return rc;
+	}
+
+	total_bytes = fw->size;
+	i2c_reg_setting.addr_type = CAMERA_SENSOR_I2C_TYPE_BYTE;
+	i2c_reg_setting.data_type = CAMERA_SENSOR_I2C_TYPE_BYTE;
+	i2c_reg_setting.size = total_bytes;
+	i2c_reg_setting.delay = 0;
+	fw_size = PAGE_ALIGN(sizeof(struct cam_sensor_i2c_reg_array) *
+		total_bytes) >> PAGE_SHIFT;
+	page = cma_alloc(dev_get_cma_area((o_ctrl->soc_info.dev)),
+		fw_size, 0, GFP_KERNEL);
+	if (!page) {
+		CAM_ERR(CAM_OIS, "Failed in allocating i2c_array");
+		release_firmware(fw);
+		return -ENOMEM;
+	}
+
+	i2c_reg_setting.reg_setting = (struct cam_sensor_i2c_reg_array *) (
+		page_address(page));
+
+	for (cnt = 0, ptr = (uint8_t *)fw->data; cnt < total_bytes;
+		cnt++, ptr++) {
+		i2c_reg_setting.reg_setting[cnt].reg_addr =
+			o_ctrl->opcode.prog;
+		i2c_reg_setting.reg_setting[cnt].reg_data = *ptr;
+		i2c_reg_setting.reg_setting[cnt].delay = 0;
+		i2c_reg_setting.reg_setting[cnt].data_mask = 0;
+	}
+
+	rc = camera_io_dev_write_continuous(&(o_ctrl->io_master_info),
+		&i2c_reg_setting, 1);
+	if (rc < 0) {
+		CAM_ERR(CAM_OIS, "OIS FW download failed %d", rc);
+		goto release_firmware;
+	}
+	cma_release(dev_get_cma_area((o_ctrl->soc_info.dev)),
+		page, fw_size);
+	page = NULL;
+	fw_size = 0;
+	release_firmware(fw);
+
+	rc = request_firmware(&fw, fw_name_coeff, dev);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "Failed to locate %s", fw_name_coeff);
+		return rc;
+	}
+
+	total_bytes = fw->size;
+	i2c_reg_setting.addr_type = CAMERA_SENSOR_I2C_TYPE_BYTE;
+	i2c_reg_setting.data_type = CAMERA_SENSOR_I2C_TYPE_BYTE;
+	i2c_reg_setting.size = total_bytes;
+	i2c_reg_setting.delay = 0;
+	fw_size = PAGE_ALIGN(sizeof(struct cam_sensor_i2c_reg_array) *
+		total_bytes) >> PAGE_SHIFT;
+	page = cma_alloc(dev_get_cma_area((o_ctrl->soc_info.dev)),
+		fw_size, 0, GFP_KERNEL);
+	if (!page) {
+		CAM_ERR(CAM_OIS, "Failed in allocating i2c_array");
+		release_firmware(fw);
+		return -ENOMEM;
+	}
+
+	i2c_reg_setting.reg_setting = (struct cam_sensor_i2c_reg_array *) (
+		page_address(page));
+
+	for (cnt = 0, ptr = (uint8_t *)fw->data; cnt < total_bytes;
+		cnt++, ptr++) {
+		i2c_reg_setting.reg_setting[cnt].reg_addr =
+			o_ctrl->opcode.coeff;
+		i2c_reg_setting.reg_setting[cnt].reg_data = *ptr;
+		i2c_reg_setting.reg_setting[cnt].delay = 0;
+		i2c_reg_setting.reg_setting[cnt].data_mask = 0;
+	}
+
+	rc = camera_io_dev_write_continuous(&(o_ctrl->io_master_info),
+		&i2c_reg_setting, 1);
+	if (rc < 0)
+		CAM_ERR(CAM_OIS, "OIS FW download failed %d", rc);
+
+release_firmware:
+	cma_release(dev_get_cma_area((o_ctrl->soc_info.dev)),
+		page, fw_size);
+	release_firmware(fw);
+
+	return rc;
+}
+
+/**
+ * cam_ois_pkt_parse - Parse csl packet
+ * @o_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+static int cam_ois_pkt_parse(struct cam_ois_ctrl_t *o_ctrl, void *arg)
+{
+	int32_t                         rc = 0;
+	int32_t                         i = 0;
+	uint32_t                        total_cmd_buf_in_bytes = 0;
+	struct common_header           *cmm_hdr = NULL;
+	uintptr_t                       generic_ptr;
+	struct cam_control             *ioctl_ctrl = NULL;
+	struct cam_config_dev_cmd       dev_config;
+	struct i2c_settings_array      *i2c_reg_settings = NULL;
+	struct cam_cmd_buf_desc        *cmd_desc = NULL;
+	uintptr_t                       generic_pkt_addr;
+	size_t                          pkt_len;
+	struct cam_packet              *csl_packet = NULL;
+	size_t                          len_of_buff = 0;
+	uint32_t                       *offset = NULL, *cmd_buf;
+	struct cam_ois_soc_private     *soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t  *power_info = &soc_private->power_info;
+
+	ioctl_ctrl = (struct cam_control *)arg;
+	if (copy_from_user(&dev_config,
+		u64_to_user_ptr(ioctl_ctrl->handle),
+		sizeof(dev_config)))
+		return -EFAULT;
+	rc = cam_mem_get_cpu_buf(dev_config.packet_handle,
+		&generic_pkt_addr, &pkt_len);
+	if (rc) {
+		CAM_ERR(CAM_OIS,
+			"error in converting command Handle Error: %d", rc);
+		return rc;
+	}
+
+	if (dev_config.offset > pkt_len) {
+		CAM_ERR(CAM_OIS,
+			"offset is out of bound: off: %lld len: %zu",
+			dev_config.offset, pkt_len);
+		return -EINVAL;
+	}
+
+	csl_packet = (struct cam_packet *)
+		(generic_pkt_addr + (uint32_t)dev_config.offset);
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_OIS_PACKET_OPCODE_INIT:
+		offset = (uint32_t *)&csl_packet->payload;
+		offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+		/* Loop through multiple command buffers */
+		for (i = 0; i < csl_packet->num_cmd_buf; i++) {
+			total_cmd_buf_in_bytes = cmd_desc[i].length;
+			if (!total_cmd_buf_in_bytes)
+				continue;
+
+			rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+				&generic_ptr, &len_of_buff);
+			if (rc < 0) {
+				CAM_ERR(CAM_OIS, "Failed to get cpu buf");
+				return rc;
+			}
+			cmd_buf = (uint32_t *)generic_ptr;
+			if (!cmd_buf) {
+				CAM_ERR(CAM_OIS, "invalid cmd buf");
+				return -EINVAL;
+			}
+			cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+			cmm_hdr = (struct common_header *)cmd_buf;
+
+			switch (cmm_hdr->cmd_type) {
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO:
+				rc = cam_ois_slaveInfo_pkt_parser(
+					o_ctrl, cmd_buf);
+				if (rc < 0) {
+					CAM_ERR(CAM_OIS,
+					"Failed in parsing slave info");
+					return rc;
+				}
+				break;
+			case CAMERA_SENSOR_CMD_TYPE_PWR_UP:
+			case CAMERA_SENSOR_CMD_TYPE_PWR_DOWN:
+				CAM_DBG(CAM_OIS,
+					"Received power settings buffer");
+				rc = cam_sensor_update_power_settings(
+					cmd_buf,
+					total_cmd_buf_in_bytes,
+					power_info);
+				if (rc) {
+					CAM_ERR(CAM_OIS,
+					"Failed: parse power settings");
+					return rc;
+				}
+				break;
+			default:
+			if (o_ctrl->i2c_init_data.is_settings_valid == 0) {
+				CAM_DBG(CAM_OIS,
+				"Received init settings");
+				i2c_reg_settings =
+					&(o_ctrl->i2c_init_data);
+				i2c_reg_settings->is_settings_valid = 1;
+				i2c_reg_settings->request_id = 0;
+				rc = cam_sensor_i2c_command_parser(
+					&o_ctrl->io_master_info,
+					i2c_reg_settings,
+					&cmd_desc[i], 1);
+				if (rc < 0) {
+					CAM_ERR(CAM_OIS,
+					"init parsing failed: %d", rc);
+					return rc;
+				}
+			} else if ((o_ctrl->is_ois_calib != 0) &&
+				(o_ctrl->i2c_calib_data.is_settings_valid ==
+				0)) {
+				CAM_DBG(CAM_OIS,
+					"Received calib settings");
+				i2c_reg_settings = &(o_ctrl->i2c_calib_data);
+				i2c_reg_settings->is_settings_valid = 1;
+				i2c_reg_settings->request_id = 0;
+				rc = cam_sensor_i2c_command_parser(
+					&o_ctrl->io_master_info,
+					i2c_reg_settings,
+					&cmd_desc[i], 1);
+				if (rc < 0) {
+					CAM_ERR(CAM_OIS,
+						"Calib parsing failed: %d", rc);
+					return rc;
+				}
+			}
+			break;
+			}
+		}
+
+		if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) {
+			rc = cam_ois_power_up(o_ctrl);
+			if (rc) {
+				CAM_ERR(CAM_OIS, " OIS Power up failed");
+				return rc;
+			}
+			o_ctrl->cam_ois_state = CAM_OIS_CONFIG;
+		}
+
+		if (o_ctrl->ois_fw_flag) {
+			rc = cam_ois_fw_download(o_ctrl);
+			if (rc) {
+				CAM_ERR(CAM_OIS, "Failed OIS FW Download");
+				goto pwr_dwn;
+			}
+		}
+
+		rc = cam_ois_apply_settings(o_ctrl, &o_ctrl->i2c_init_data);
+		if (rc < 0) {
+			CAM_ERR(CAM_OIS, "Cannot apply Init settings");
+			goto pwr_dwn;
+		}
+
+		if (o_ctrl->is_ois_calib) {
+			rc = cam_ois_apply_settings(o_ctrl,
+				&o_ctrl->i2c_calib_data);
+			if (rc) {
+				CAM_ERR(CAM_OIS, "Cannot apply calib data");
+				goto pwr_dwn;
+			}
+		}
+
+		rc = delete_request(&o_ctrl->i2c_init_data);
+		if (rc < 0) {
+			CAM_WARN(CAM_OIS,
+				"Fail deleting Init data: rc: %d", rc);
+			rc = 0;
+		}
+		rc = delete_request(&o_ctrl->i2c_calib_data);
+		if (rc < 0) {
+			CAM_WARN(CAM_OIS,
+				"Fail deleting Calibration data: rc: %d", rc);
+			rc = 0;
+		}
+		break;
+	case CAM_OIS_PACKET_OPCODE_OIS_CONTROL:
+		if (o_ctrl->cam_ois_state < CAM_OIS_CONFIG) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_OIS,
+				"Not in right state to control OIS: %d",
+				o_ctrl->cam_ois_state);
+			return rc;
+		}
+		offset = (uint32_t *)&csl_packet->payload;
+		offset += (csl_packet->cmd_buf_offset / sizeof(uint32_t));
+		cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+		i2c_reg_settings = &(o_ctrl->i2c_mode_data);
+		i2c_reg_settings->is_settings_valid = 1;
+		i2c_reg_settings->request_id = 0;
+		rc = cam_sensor_i2c_command_parser(&o_ctrl->io_master_info,
+			i2c_reg_settings,
+			cmd_desc, 1);
+		if (rc < 0) {
+			CAM_ERR(CAM_OIS, "OIS pkt parsing failed: %d", rc);
+			return rc;
+		}
+
+		rc = cam_ois_apply_settings(o_ctrl, i2c_reg_settings);
+		if (rc < 0) {
+			CAM_ERR(CAM_OIS, "Cannot apply mode settings");
+			return rc;
+		}
+
+		rc = delete_request(i2c_reg_settings);
+		if (rc < 0)
+			CAM_ERR(CAM_OIS,
+				"Fail deleting Mode data: rc: %d", rc);
+		break;
+	default:
+		break;
+	}
+	return rc;
+pwr_dwn:
+	cam_ois_power_down(o_ctrl);
+	return rc;
+}
+
+void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl)
+{
+	int rc = 0;
+	struct cam_ois_soc_private *soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+
+	if (o_ctrl->cam_ois_state == CAM_OIS_INIT)
+		return;
+
+	if (o_ctrl->cam_ois_state >= CAM_OIS_CONFIG) {
+		rc = cam_ois_power_down(o_ctrl);
+		if (rc < 0)
+			CAM_ERR(CAM_OIS, "OIS Power down failed");
+		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
+	}
+
+	if (o_ctrl->cam_ois_state >= CAM_OIS_ACQUIRE) {
+		rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_OIS, "destroying the device hdl");
+		o_ctrl->bridge_intf.device_hdl = -1;
+		o_ctrl->bridge_intf.link_hdl = -1;
+		o_ctrl->bridge_intf.session_hdl = -1;
+	}
+
+	if (o_ctrl->i2c_mode_data.is_settings_valid == 1)
+		delete_request(&o_ctrl->i2c_mode_data);
+
+	if (o_ctrl->i2c_calib_data.is_settings_valid == 1)
+		delete_request(&o_ctrl->i2c_calib_data);
+
+	if (o_ctrl->i2c_init_data.is_settings_valid == 1)
+		delete_request(&o_ctrl->i2c_init_data);
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	power_info->power_down_setting_size = 0;
+	power_info->power_setting_size = 0;
+
+	o_ctrl->cam_ois_state = CAM_OIS_INIT;
+}
+
+/**
+ * cam_ois_driver_cmd - Handle ois cmds
+ * @e_ctrl:     ctrl structure
+ * @arg:        Camera control command argument
+ *
+ * Returns success or failure
+ */
+int cam_ois_driver_cmd(struct cam_ois_ctrl_t *o_ctrl, void *arg)
+{
+	int                              rc = 0;
+	struct cam_ois_query_cap_t       ois_cap = {0};
+	struct cam_control              *cmd = (struct cam_control *)arg;
+	struct cam_ois_soc_private      *soc_private = NULL;
+	struct cam_sensor_power_ctrl_t  *power_info = NULL;
+
+	if (!o_ctrl || !cmd) {
+		CAM_ERR(CAM_OIS, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+		CAM_ERR(CAM_OIS, "Invalid handle type: %d",
+			cmd->handle_type);
+		return -EINVAL;
+	}
+
+	soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	mutex_lock(&(o_ctrl->ois_mutex));
+	switch (cmd->op_code) {
+	case CAM_QUERY_CAP:
+		ois_cap.slot_info = o_ctrl->soc_info.index;
+
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&ois_cap,
+			sizeof(struct cam_ois_query_cap_t))) {
+			CAM_ERR(CAM_OIS, "Failed Copy to User");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+		CAM_DBG(CAM_OIS, "ois_cap: ID: %d", ois_cap.slot_info);
+		break;
+	case CAM_ACQUIRE_DEV:
+		rc = cam_ois_get_dev_handle(o_ctrl, arg);
+		if (rc) {
+			CAM_ERR(CAM_OIS, "Failed to acquire dev");
+			goto release_mutex;
+		}
+
+		o_ctrl->cam_ois_state = CAM_OIS_ACQUIRE;
+		break;
+	case CAM_START_DEV:
+		if (o_ctrl->cam_ois_state != CAM_OIS_CONFIG) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_OIS,
+			"Not in right state for start : %d",
+			o_ctrl->cam_ois_state);
+			goto release_mutex;
+		}
+		o_ctrl->cam_ois_state = CAM_OIS_START;
+		break;
+	case CAM_CONFIG_DEV:
+		rc = cam_ois_pkt_parse(o_ctrl, arg);
+		if (rc) {
+			CAM_ERR(CAM_OIS, "Failed in ois pkt Parsing");
+			goto release_mutex;
+		}
+		break;
+	case CAM_RELEASE_DEV:
+		if (o_ctrl->cam_ois_state == CAM_OIS_START) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_OIS,
+				"Cant release ois: in start state");
+			goto release_mutex;
+		}
+
+		if (o_ctrl->cam_ois_state == CAM_OIS_CONFIG) {
+			rc = cam_ois_power_down(o_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_OIS, "OIS Power down failed");
+				goto release_mutex;
+			}
+		}
+
+		if (o_ctrl->bridge_intf.device_hdl == -1) {
+			CAM_ERR(CAM_OIS, "link hdl: %d device hdl: %d",
+				o_ctrl->bridge_intf.device_hdl,
+				o_ctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = cam_destroy_device_hdl(o_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_OIS, "destroying the device hdl");
+		o_ctrl->bridge_intf.device_hdl = -1;
+		o_ctrl->bridge_intf.link_hdl = -1;
+		o_ctrl->bridge_intf.session_hdl = -1;
+		o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
+		kfree(power_info->power_setting);
+		kfree(power_info->power_down_setting);
+		power_info->power_setting = NULL;
+		power_info->power_down_setting = NULL;
+		power_info->power_down_setting_size = 0;
+		power_info->power_setting_size = 0;
+
+		if (o_ctrl->i2c_mode_data.is_settings_valid == 1)
+			delete_request(&o_ctrl->i2c_mode_data);
+
+		if (o_ctrl->i2c_calib_data.is_settings_valid == 1)
+			delete_request(&o_ctrl->i2c_calib_data);
+
+		if (o_ctrl->i2c_init_data.is_settings_valid == 1)
+			delete_request(&o_ctrl->i2c_init_data);
+
+		break;
+	case CAM_STOP_DEV:
+		if (o_ctrl->cam_ois_state != CAM_OIS_START) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_OIS,
+			"Not in right state for stop : %d",
+			o_ctrl->cam_ois_state);
+		}
+		o_ctrl->cam_ois_state = CAM_OIS_CONFIG;
+		break;
+	default:
+		CAM_ERR(CAM_OIS, "invalid opcode");
+		goto release_mutex;
+	}
+release_mutex:
+	mutex_unlock(&(o_ctrl->ois_mutex));
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h
new file mode 100644
index 0000000..f1fcf1d
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_core.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_OIS_CORE_H_
+#define _CAM_OIS_CORE_H_
+
+#include <linux/cma.h>
+#include <linux/dma-contiguous.h>
+#include "cam_ois_dev.h"
+
+/**
+ * @power_info: power setting info to control the power
+ *
+ * This API construct the default ois power setting.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int32_t cam_ois_construct_default_power_setting(
+	struct cam_sensor_power_ctrl_t *power_info);
+
+
+int cam_ois_driver_cmd(struct cam_ois_ctrl_t *e_ctrl, void *arg);
+
+/**
+ * @o_ctrl: OIS ctrl structure
+ *
+ * This API handles the shutdown ioctl/close
+ */
+void cam_ois_shutdown(struct cam_ois_ctrl_t *o_ctrl);
+
+#endif
+/* _CAM_OIS_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
new file mode 100644
index 0000000..e4c9197
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.c
@@ -0,0 +1,416 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_ois_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_ois_soc.h"
+#include "cam_ois_core.h"
+#include "cam_debug_util.h"
+
+static long cam_ois_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int                       rc     = 0;
+	struct cam_ois_ctrl_t *o_ctrl = v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_ois_driver_cmd(o_ctrl, arg);
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+		break;
+	}
+
+	return rc;
+}
+
+static int cam_ois_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_ois_ctrl_t *o_ctrl =
+		v4l2_get_subdevdata(sd);
+
+	if (!o_ctrl) {
+		CAM_ERR(CAM_OIS, "o_ctrl ptr is NULL");
+			return -EINVAL;
+	}
+
+	mutex_lock(&(o_ctrl->ois_mutex));
+	cam_ois_shutdown(o_ctrl);
+	mutex_unlock(&(o_ctrl->ois_mutex));
+
+	return 0;
+}
+
+static int32_t cam_ois_update_i2c_info(struct cam_ois_ctrl_t *o_ctrl,
+	struct cam_ois_i2c_info_t *i2c_info)
+{
+	struct cam_sensor_cci_client        *cci_client = NULL;
+
+	if (o_ctrl->io_master_info.master_type == CCI_MASTER) {
+		cci_client = o_ctrl->io_master_info.cci_client;
+		if (!cci_client) {
+			CAM_ERR(CAM_OIS, "failed: cci_client %pK",
+				cci_client);
+			return -EINVAL;
+		}
+		cci_client->cci_i2c_master = o_ctrl->cci_i2c_master;
+		cci_client->sid = (i2c_info->slave_addr) >> 1;
+		cci_client->retries = 3;
+		cci_client->id_map = 0;
+		cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_ois_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_OIS,
+			"Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_ois_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc) {
+			CAM_ERR(CAM_OIS,
+				"Failed in ois suddev handling rc %d",
+				rc);
+			return rc;
+		}
+		break;
+	default:
+		CAM_ERR(CAM_OIS, "Invalid compat ioctl: %d", cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_OIS,
+				"Failed to copy from user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+	return rc;
+}
+#endif
+
+static const struct v4l2_subdev_internal_ops cam_ois_internal_ops = {
+	.close = cam_ois_subdev_close,
+};
+
+static struct v4l2_subdev_core_ops cam_ois_subdev_core_ops = {
+	.ioctl = cam_ois_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_ois_init_subdev_do_ioctl,
+#endif
+};
+
+static struct v4l2_subdev_ops cam_ois_subdev_ops = {
+	.core = &cam_ois_subdev_core_ops,
+};
+
+static int cam_ois_init_subdev_param(struct cam_ois_ctrl_t *o_ctrl)
+{
+	int rc = 0;
+
+	o_ctrl->v4l2_dev_str.internal_ops = &cam_ois_internal_ops;
+	o_ctrl->v4l2_dev_str.ops = &cam_ois_subdev_ops;
+	strlcpy(o_ctrl->device_name, CAM_OIS_NAME,
+		sizeof(o_ctrl->device_name));
+	o_ctrl->v4l2_dev_str.name = o_ctrl->device_name;
+	o_ctrl->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	o_ctrl->v4l2_dev_str.ent_function = CAM_OIS_DEVICE_TYPE;
+	o_ctrl->v4l2_dev_str.token = o_ctrl;
+
+	rc = cam_register_subdev(&(o_ctrl->v4l2_dev_str));
+	if (rc)
+		CAM_ERR(CAM_OIS, "fail to create subdev");
+
+	return rc;
+}
+
+static int cam_ois_i2c_driver_probe(struct i2c_client *client,
+	 const struct i2c_device_id *id)
+{
+	int                          rc = 0;
+	struct cam_ois_ctrl_t       *o_ctrl = NULL;
+	struct cam_ois_soc_private  *soc_private = NULL;
+
+	if (client == NULL || id == NULL) {
+		CAM_ERR(CAM_OIS, "Invalid Args client: %pK id: %pK",
+			client, id);
+		return -EINVAL;
+	}
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CAM_ERR(CAM_OIS, "i2c_check_functionality failed");
+		goto probe_failure;
+	}
+
+	o_ctrl = kzalloc(sizeof(*o_ctrl), GFP_KERNEL);
+	if (!o_ctrl) {
+		CAM_ERR(CAM_OIS, "kzalloc failed");
+		rc = -ENOMEM;
+		goto probe_failure;
+	}
+
+	i2c_set_clientdata(client, o_ctrl);
+
+	o_ctrl->soc_info.dev = &client->dev;
+	o_ctrl->soc_info.dev_name = client->name;
+	o_ctrl->ois_device_type = MSM_CAMERA_I2C_DEVICE;
+	o_ctrl->io_master_info.master_type = I2C_MASTER;
+	o_ctrl->io_master_info.client = client;
+
+	soc_private = kzalloc(sizeof(struct cam_ois_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto octrl_free;
+	}
+
+	o_ctrl->soc_info.soc_private = soc_private;
+	rc = cam_ois_driver_soc_init(o_ctrl);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "failed: cam_sensor_parse_dt rc %d", rc);
+		goto soc_free;
+	}
+
+	rc = cam_ois_init_subdev_param(o_ctrl);
+	if (rc)
+		goto soc_free;
+
+	o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
+	return rc;
+
+soc_free:
+	kfree(soc_private);
+octrl_free:
+	kfree(o_ctrl);
+probe_failure:
+	return rc;
+}
+
+static int cam_ois_i2c_driver_remove(struct i2c_client *client)
+{
+	int                             i;
+	struct cam_ois_ctrl_t          *o_ctrl = i2c_get_clientdata(client);
+	struct cam_hw_soc_info         *soc_info;
+	struct cam_ois_soc_private     *soc_private;
+	struct cam_sensor_power_ctrl_t *power_info;
+
+	if (!o_ctrl) {
+		CAM_ERR(CAM_OIS, "ois device is NULL");
+		return -EINVAL;
+	}
+
+	soc_info = &o_ctrl->soc_info;
+
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+	soc_private =
+		(struct cam_ois_soc_private *)soc_info->soc_private;
+	power_info = &soc_private->power_info;
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	kfree(o_ctrl->soc_info.soc_private);
+	kfree(o_ctrl);
+
+	return 0;
+}
+
+static int32_t cam_ois_platform_driver_probe(
+	struct platform_device *pdev)
+{
+	int32_t                         rc = 0;
+	struct cam_ois_ctrl_t          *o_ctrl = NULL;
+	struct cam_ois_soc_private     *soc_private = NULL;
+
+	o_ctrl = kzalloc(sizeof(struct cam_ois_ctrl_t), GFP_KERNEL);
+	if (!o_ctrl)
+		return -ENOMEM;
+
+	o_ctrl->soc_info.pdev = pdev;
+	o_ctrl->pdev = pdev;
+	o_ctrl->soc_info.dev = &pdev->dev;
+	o_ctrl->soc_info.dev_name = pdev->name;
+
+	o_ctrl->ois_device_type = MSM_CAMERA_PLATFORM_DEVICE;
+
+	o_ctrl->io_master_info.master_type = CCI_MASTER;
+	o_ctrl->io_master_info.cci_client = kzalloc(
+		sizeof(struct cam_sensor_cci_client), GFP_KERNEL);
+	if (!o_ctrl->io_master_info.cci_client)
+		goto free_o_ctrl;
+
+	soc_private = kzalloc(sizeof(struct cam_ois_soc_private),
+		GFP_KERNEL);
+	if (!soc_private) {
+		rc = -ENOMEM;
+		goto free_cci_client;
+	}
+	o_ctrl->soc_info.soc_private = soc_private;
+	soc_private->power_info.dev  = &pdev->dev;
+
+	INIT_LIST_HEAD(&(o_ctrl->i2c_init_data.list_head));
+	INIT_LIST_HEAD(&(o_ctrl->i2c_calib_data.list_head));
+	INIT_LIST_HEAD(&(o_ctrl->i2c_mode_data.list_head));
+	mutex_init(&(o_ctrl->ois_mutex));
+	rc = cam_ois_driver_soc_init(o_ctrl);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "failed: soc init rc %d", rc);
+		goto free_soc;
+	}
+
+	rc = cam_ois_init_subdev_param(o_ctrl);
+	if (rc)
+		goto free_soc;
+
+	rc = cam_ois_update_i2c_info(o_ctrl, &soc_private->i2c_info);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "failed: to update i2c info rc %d", rc);
+		goto unreg_subdev;
+	}
+	o_ctrl->bridge_intf.device_hdl = -1;
+
+	platform_set_drvdata(pdev, o_ctrl);
+	v4l2_set_subdevdata(&o_ctrl->v4l2_dev_str.sd, o_ctrl);
+
+	o_ctrl->cam_ois_state = CAM_OIS_INIT;
+
+	return rc;
+unreg_subdev:
+	cam_unregister_subdev(&(o_ctrl->v4l2_dev_str));
+free_soc:
+	kfree(soc_private);
+free_cci_client:
+	kfree(o_ctrl->io_master_info.cci_client);
+free_o_ctrl:
+	kfree(o_ctrl);
+	return rc;
+}
+
+static int cam_ois_platform_driver_remove(struct platform_device *pdev)
+{
+	int                             i;
+	struct cam_ois_ctrl_t          *o_ctrl;
+	struct cam_ois_soc_private     *soc_private;
+	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_hw_soc_info         *soc_info;
+
+	o_ctrl = platform_get_drvdata(pdev);
+	if (!o_ctrl) {
+		CAM_ERR(CAM_OIS, "ois device is NULL");
+		return -EINVAL;
+	}
+
+	soc_info = &o_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+	soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	power_info = &soc_private->power_info;
+
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	kfree(o_ctrl->soc_info.soc_private);
+	kfree(o_ctrl->io_master_info.cci_client);
+	kfree(o_ctrl);
+	return 0;
+}
+
+static const struct of_device_id cam_ois_dt_match[] = {
+	{ .compatible = "qcom,ois" },
+	{ }
+};
+
+
+MODULE_DEVICE_TABLE(of, cam_ois_dt_match);
+
+static struct platform_driver cam_ois_platform_driver = {
+	.driver = {
+		.name = "qcom,ois",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_ois_dt_match,
+	},
+	.probe = cam_ois_platform_driver_probe,
+	.remove = cam_ois_platform_driver_remove,
+};
+static const struct i2c_device_id cam_ois_i2c_id[] = {
+	{ "msm_ois", (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_ois_i2c_driver = {
+	.id_table = cam_ois_i2c_id,
+	.probe  = cam_ois_i2c_driver_probe,
+	.remove = cam_ois_i2c_driver_remove,
+	.driver = {
+		.name = "msm_ois",
+	},
+};
+
+static struct cam_ois_registered_driver_t registered_driver = {
+	0, 0};
+
+static int __init cam_ois_driver_init(void)
+{
+	int rc = 0;
+
+	rc = platform_driver_register(&cam_ois_platform_driver);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "platform_driver_register failed rc = %d",
+			rc);
+		return rc;
+	}
+
+	registered_driver.platform_driver = 1;
+
+	rc = i2c_add_driver(&cam_ois_i2c_driver);
+	if (rc) {
+		CAM_ERR(CAM_OIS, "i2c_add_driver failed rc = %d", rc);
+		return rc;
+	}
+
+	registered_driver.i2c_driver = 1;
+	return rc;
+}
+
+static void __exit cam_ois_driver_exit(void)
+{
+	if (registered_driver.platform_driver)
+		platform_driver_unregister(&cam_ois_platform_driver);
+
+	if (registered_driver.i2c_driver)
+		i2c_del_driver(&cam_ois_i2c_driver);
+}
+
+module_init(cam_ois_driver_init);
+module_exit(cam_ois_driver_exit);
+MODULE_DESCRIPTION("CAM OIS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
new file mode 100644
index 0000000..0ae48e0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_dev.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_OIS_DEV_H_
+#define _CAM_OIS_DEV_H_
+
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/cam_sensor.h>
+#include <cam_sensor_i2c.h>
+#include <cam_sensor_spi.h>
+#include <cam_sensor_io.h>
+#include <cam_cci_dev.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include <cam_subdev.h>
+#include "cam_soc_util.h"
+
+#define DEFINE_MSM_MUTEX(mutexname) \
+	static struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+enum cam_ois_state {
+	CAM_OIS_INIT,
+	CAM_OIS_ACQUIRE,
+	CAM_OIS_CONFIG,
+	CAM_OIS_START,
+};
+
+/**
+ * struct cam_ois_registered_driver_t - registered driver info
+ * @platform_driver      :   flag indicates if platform driver is registered
+ * @i2c_driver           :   flag indicates if i2c driver is registered
+ *
+ */
+struct cam_ois_registered_driver_t {
+	bool platform_driver;
+	bool i2c_driver;
+};
+
+/**
+ * struct cam_ois_i2c_info_t - I2C info
+ * @slave_addr      :   slave address
+ * @i2c_freq_mode   :   i2c frequency mode
+ *
+ */
+struct cam_ois_i2c_info_t {
+	uint16_t slave_addr;
+	uint8_t i2c_freq_mode;
+};
+
+/**
+ * struct cam_ois_soc_private - ois soc private data structure
+ * @ois_name        :   ois name
+ * @i2c_info        :   i2c info structure
+ * @power_info      :   ois power info
+ *
+ */
+struct cam_ois_soc_private {
+	const char *ois_name;
+	struct cam_ois_i2c_info_t i2c_info;
+	struct cam_sensor_power_ctrl_t power_info;
+};
+
+/**
+ * struct cam_ois_intf_params - bridge interface params
+ * @device_hdl   : Device Handle
+ * @session_hdl  : Session Handle
+ * @ops          : KMD operations
+ * @crm_cb       : Callback API pointers
+ */
+struct cam_ois_intf_params {
+	int32_t device_hdl;
+	int32_t session_hdl;
+	int32_t link_hdl;
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_ois_ctrl_t - OIS ctrl private data
+ * @pdev            :   platform device
+ * @ois_mutex       :   ois mutex
+ * @soc_info        :   ois soc related info
+ * @io_master_info  :   Information about the communication master
+ * @cci_i2c_master  :   I2C structure
+ * @v4l2_dev_str    :   V4L2 device structure
+ * @bridge_intf     :   bridge interface params
+ * @i2c_init_data   :   ois i2c init settings
+ * @i2c_mode_data   :   ois i2c mode settings
+ * @i2c_calib_data  :   ois i2c calib settings
+ * @ois_device_type :   ois device type
+ * @cam_ois_state   :   ois_device_state
+ * @ois_name        :   ois name
+ * @ois_fw_flag     :   flag for firmware download
+ * @is_ois_calib    :   flag for Calibration data
+ * @opcode          :   ois opcode
+ * @device_name     :   Device name
+ *
+ */
+struct cam_ois_ctrl_t {
+	struct platform_device *pdev;
+	struct mutex ois_mutex;
+	struct cam_hw_soc_info soc_info;
+	struct camera_io_master io_master_info;
+	enum cci_i2c_master_t cci_i2c_master;
+	enum cci_device_num cci_num;
+	struct cam_subdev v4l2_dev_str;
+	struct cam_ois_intf_params bridge_intf;
+	struct i2c_settings_array i2c_init_data;
+	struct i2c_settings_array i2c_calib_data;
+	struct i2c_settings_array i2c_mode_data;
+	enum msm_camera_device_type_t ois_device_type;
+	enum cam_ois_state cam_ois_state;
+	char device_name[20];
+	char ois_name[32];
+	uint8_t ois_fw_flag;
+	uint8_t is_ois_calib;
+	struct cam_ois_opcode opcode;
+};
+
+#endif /*_CAM_OIS_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c
new file mode 100644
index 0000000..6e43007
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+
+#include "cam_ois_soc.h"
+#include "cam_debug_util.h"
+
+/**
+ * @e_ctrl: ctrl structure
+ *
+ * Parses ois dt
+ */
+static int cam_ois_get_dt_data(struct cam_ois_ctrl_t *o_ctrl)
+{
+	int                             i, rc = 0;
+	struct cam_hw_soc_info         *soc_info = &o_ctrl->soc_info;
+	struct cam_ois_soc_private     *soc_private =
+		(struct cam_ois_soc_private *)o_ctrl->soc_info.soc_private;
+	struct cam_sensor_power_ctrl_t *power_info = &soc_private->power_info;
+	struct device_node             *of_node = NULL;
+
+	of_node = soc_info->dev->of_node;
+
+	if (!of_node) {
+		CAM_ERR(CAM_OIS, "of_node is NULL, device type %d",
+			o_ctrl->ois_device_type);
+		return -EINVAL;
+	}
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_OIS, "cam_soc_util_get_dt_properties rc %d",
+			rc);
+		return rc;
+	}
+
+	if (!soc_info->gpio_data) {
+		CAM_INFO(CAM_OIS, "No GPIO found");
+		return 0;
+	}
+
+	if (!soc_info->gpio_data->cam_gpio_common_tbl_size) {
+		CAM_INFO(CAM_OIS, "No GPIO found");
+		return -EINVAL;
+	}
+
+	rc = cam_sensor_util_init_gpio_pin_tbl(soc_info,
+		&power_info->gpio_num_info);
+	if ((rc < 0) || (!power_info->gpio_num_info)) {
+		CAM_ERR(CAM_OIS, "No/Error OIS GPIOs");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = devm_clk_get(soc_info->dev,
+			soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			CAM_ERR(CAM_SENSOR, "get failed for %s",
+				soc_info->clk_name[i]);
+			rc = -ENOENT;
+			return rc;
+		}
+	}
+
+	return rc;
+}
+/**
+ * @o_ctrl: ctrl structure
+ *
+ * This function is called from cam_ois_platform/i2c_driver_probe, it parses
+ * the ois dt node.
+ */
+int cam_ois_driver_soc_init(struct cam_ois_ctrl_t *o_ctrl)
+{
+	int                             rc = 0;
+	struct cam_hw_soc_info         *soc_info = &o_ctrl->soc_info;
+	struct device_node             *of_node = NULL;
+
+	if (!soc_info->dev) {
+		CAM_ERR(CAM_OIS, "soc_info is not initialized");
+		return -EINVAL;
+	}
+
+	of_node = soc_info->dev->of_node;
+	if (!of_node) {
+		CAM_ERR(CAM_OIS, "dev.of_node NULL");
+		return -EINVAL;
+	}
+
+	if (o_ctrl->ois_device_type == MSM_CAMERA_PLATFORM_DEVICE) {
+		rc = of_property_read_u32(of_node, "cci-master",
+			&o_ctrl->cci_i2c_master);
+		if (rc < 0) {
+			CAM_DBG(CAM_OIS, "failed rc %d", rc);
+			return rc;
+		}
+
+		rc = of_property_read_u32(of_node, "cci-device",
+			&o_ctrl->cci_num);
+		CAM_DBG(CAM_ACTUATOR, "cci-device %d, rc %d",
+			o_ctrl->cci_num, rc);
+		if (rc < 0) {
+			/* Set default master 0 */
+			o_ctrl->cci_num = CCI_DEVICE_0;
+			rc = 0;
+		}
+		o_ctrl->io_master_info.cci_client->cci_device = o_ctrl->cci_num;
+	}
+
+	rc = cam_ois_get_dt_data(o_ctrl);
+	if (rc < 0)
+		CAM_DBG(CAM_OIS, "failed: ois get dt data rc %d", rc);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.h
new file mode 100644
index 0000000..cbffec1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_ois/cam_ois_soc.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _CAM_OIS_SOC_H_
+#define _CAM_OIS_SOC_H_
+
+#include "cam_ois_dev.h"
+
+int cam_ois_driver_soc_init(struct cam_ois_ctrl_t *o_ctrl);
+
+#endif/* _CAM_OIS_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile
new file mode 100644
index 0000000..6345d15
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_res_mgr.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
new file mode 100644
index 0000000..9f56700
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr.c
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include "cam_debug_util.h"
+#include "cam_res_mgr_api.h"
+#include "cam_res_mgr_private.h"
+
+static struct cam_res_mgr *cam_res;
+
+static void cam_res_mgr_free_res(void)
+{
+	struct cam_dev_res *dev_res, *dev_temp;
+	struct cam_gpio_res *gpio_res, *gpio_temp;
+	struct cam_flash_res *flash_res, *flash_temp;
+
+	if (!cam_res)
+		return;
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	list_for_each_entry_safe(gpio_res, gpio_temp,
+		&cam_res->gpio_res_list, list) {
+		list_for_each_entry_safe(dev_res, dev_temp,
+			&gpio_res->dev_list, list) {
+			list_del_init(&dev_res->list);
+			kfree(dev_res);
+		}
+		list_del_init(&gpio_res->list);
+		kfree(gpio_res);
+	}
+	mutex_unlock(&cam_res->gpio_res_lock);
+
+	mutex_lock(&cam_res->flash_res_lock);
+	list_for_each_entry_safe(flash_res, flash_temp,
+		&cam_res->flash_res_list, list) {
+		list_del_init(&flash_res->list);
+		kfree(flash_res);
+	}
+	mutex_unlock(&cam_res->flash_res_lock);
+
+	mutex_lock(&cam_res->clk_res_lock);
+	cam_res->shared_clk_ref_count = 0;
+	mutex_unlock(&cam_res->clk_res_lock);
+}
+
+void cam_res_mgr_led_trigger_register(const char *name, struct led_trigger **tp)
+{
+	bool found = false;
+	struct cam_flash_res *flash_res;
+
+	if (!cam_res) {
+		/*
+		 * If this driver not probed, then just register the
+		 * led trigger.
+		 */
+		led_trigger_register_simple(name, tp);
+		return;
+	}
+
+	mutex_lock(&cam_res->flash_res_lock);
+	list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+		if (!strcmp(flash_res->name, name)) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&cam_res->flash_res_lock);
+
+	if (found) {
+		*tp = flash_res->trigger;
+	} else {
+		flash_res = kzalloc(sizeof(struct cam_flash_res), GFP_KERNEL);
+		if (!flash_res) {
+			CAM_ERR(CAM_RES,
+				"Failed to malloc memory for flash_res:%s",
+				name);
+			*tp = NULL;
+			return;
+		}
+
+		led_trigger_register_simple(name, tp);
+		INIT_LIST_HEAD(&flash_res->list);
+		flash_res->trigger = *tp;
+		flash_res->name = name;
+
+		mutex_lock(&cam_res->flash_res_lock);
+		list_add_tail(&flash_res->list, &cam_res->flash_res_list);
+		mutex_unlock(&cam_res->flash_res_lock);
+	}
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_register);
+
+void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp)
+{
+	bool found = false;
+	struct cam_flash_res *flash_res;
+
+	if (!cam_res) {
+		/*
+		 * If this driver not probed, then just unregister the
+		 * led trigger.
+		 */
+		led_trigger_unregister_simple(tp);
+		return;
+	}
+
+	mutex_lock(&cam_res->flash_res_lock);
+	list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+		if (flash_res->trigger == tp) {
+			found = true;
+			break;
+		}
+	}
+
+	if (found) {
+		led_trigger_unregister_simple(tp);
+		list_del_init(&flash_res->list);
+		kfree(flash_res);
+	}
+	mutex_unlock(&cam_res->flash_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_unregister);
+
+void cam_res_mgr_led_trigger_event(struct led_trigger *trig,
+	enum led_brightness brightness)
+{
+	bool found = false;
+	struct cam_flash_res *flash_res;
+
+	if (!cam_res) {
+		/*
+		 * If this driver not probed, then just trigger
+		 * the led event.
+		 */
+		led_trigger_event(trig, brightness);
+		return;
+	}
+
+	mutex_lock(&cam_res->flash_res_lock);
+	list_for_each_entry(flash_res, &cam_res->flash_res_list, list) {
+		if (flash_res->trigger == trig) {
+			found = true;
+			break;
+		}
+	}
+	mutex_unlock(&cam_res->flash_res_lock);
+
+	if (found)
+		led_trigger_event(trig, brightness);
+}
+EXPORT_SYMBOL(cam_res_mgr_led_trigger_event);
+
+int cam_res_mgr_shared_pinctrl_init(void)
+{
+	struct cam_soc_pinctrl_info *pinctrl_info;
+
+	/*
+	 * We allow the cam_res is NULL or shared_gpio_enabled
+	 * is false, it means this driver no probed or doesn't
+	 * have shared gpio in this device.
+	 */
+	if (!cam_res || !cam_res->shared_gpio_enabled) {
+		CAM_DBG(CAM_RES, "Not support shared gpio.");
+		return 0;
+	}
+
+	if (cam_res->pstatus != PINCTRL_STATUS_PUT) {
+		CAM_DBG(CAM_RES, "The shared pinctrl already been got.");
+		return 0;
+	}
+
+	pinctrl_info = &cam_res->dt.pinctrl_info;
+
+	pinctrl_info->pinctrl =
+		devm_pinctrl_get(cam_res->dev);
+	if (IS_ERR_OR_NULL(pinctrl_info->pinctrl)) {
+		CAM_ERR(CAM_RES, "Pinctrl not available");
+		cam_res->shared_gpio_enabled = false;
+		return -EINVAL;
+	}
+
+	pinctrl_info->gpio_state_active =
+		pinctrl_lookup_state(pinctrl_info->pinctrl,
+			CAM_RES_MGR_DEFAULT);
+	if (IS_ERR_OR_NULL(pinctrl_info->gpio_state_active)) {
+		CAM_ERR(CAM_RES,
+			"Failed to get the active state pinctrl handle");
+		cam_res->shared_gpio_enabled = false;
+		return -EINVAL;
+	}
+
+	pinctrl_info->gpio_state_suspend =
+		pinctrl_lookup_state(pinctrl_info->pinctrl,
+			CAM_RES_MGR_SLEEP);
+	if (IS_ERR_OR_NULL(pinctrl_info->gpio_state_suspend)) {
+		CAM_ERR(CAM_RES,
+			"Failed to get the active state pinctrl handle");
+		cam_res->shared_gpio_enabled = false;
+		return -EINVAL;
+	}
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	cam_res->pstatus = PINCTRL_STATUS_GOT;
+	mutex_unlock(&cam_res->gpio_res_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_init);
+
+static bool cam_res_mgr_shared_pinctrl_check_hold(void)
+{
+	int index = 0;
+	int dev_num = 0;
+	bool hold = false;
+	struct list_head *list;
+	struct cam_gpio_res *gpio_res;
+	struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+	for (; index < dt->num_shared_gpio; index++) {
+		list_for_each_entry(gpio_res,
+			&cam_res->gpio_res_list, list) {
+
+			if (gpio_res->gpio ==
+				dt->shared_gpio[index]) {
+				list_for_each(list, &gpio_res->dev_list)
+					dev_num++;
+
+				if (dev_num >= 2) {
+					hold = true;
+					break;
+				}
+			}
+		}
+	}
+
+	if (cam_res->shared_clk_ref_count > 1)
+		hold = true;
+
+	return hold;
+}
+
+void cam_res_mgr_shared_pinctrl_put(void)
+{
+	struct cam_soc_pinctrl_info *pinctrl_info;
+
+	if (!cam_res || !cam_res->shared_gpio_enabled) {
+		CAM_DBG(CAM_RES, "Not support shared gpio.");
+		return;
+	}
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+		CAM_DBG(CAM_RES, "The shared pinctrl already been put");
+		mutex_unlock(&cam_res->gpio_res_lock);
+		return;
+	}
+
+	if (cam_res_mgr_shared_pinctrl_check_hold()) {
+		CAM_INFO(CAM_RES, "Need hold put this pinctrl");
+		mutex_unlock(&cam_res->gpio_res_lock);
+		return;
+	}
+
+	pinctrl_info = &cam_res->dt.pinctrl_info;
+
+	devm_pinctrl_put(pinctrl_info->pinctrl);
+
+	cam_res->pstatus = PINCTRL_STATUS_PUT;
+	mutex_unlock(&cam_res->gpio_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_put);
+
+int cam_res_mgr_shared_pinctrl_select_state(bool active)
+{
+	int rc = 0;
+	struct cam_soc_pinctrl_info *pinctrl_info;
+
+	if (!cam_res || !cam_res->shared_gpio_enabled) {
+		CAM_DBG(CAM_RES, "Not support shared gpio.");
+		return 0;
+	}
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+		CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!");
+		mutex_unlock(&cam_res->gpio_res_lock);
+		return 0;
+	}
+
+	pinctrl_info = &cam_res->dt.pinctrl_info;
+
+	if (active && (cam_res->pstatus != PINCTRL_STATUS_ACTIVE)) {
+		rc = pinctrl_select_state(pinctrl_info->pinctrl,
+			pinctrl_info->gpio_state_active);
+		cam_res->pstatus = PINCTRL_STATUS_ACTIVE;
+	} else if (!active &&
+		!cam_res_mgr_shared_pinctrl_check_hold()) {
+		rc = pinctrl_select_state(pinctrl_info->pinctrl,
+			pinctrl_info->gpio_state_suspend);
+		cam_res->pstatus = PINCTRL_STATUS_SUSPEND;
+	}
+	mutex_unlock(&cam_res->gpio_res_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_select_state);
+
+int cam_res_mgr_shared_pinctrl_post_init(void)
+{
+	int ret = 0;
+	struct cam_soc_pinctrl_info *pinctrl_info;
+
+	if (!cam_res || !cam_res->shared_gpio_enabled) {
+		CAM_DBG(CAM_RES, "Not support shared gpio.");
+		return ret;
+	}
+
+	mutex_lock(&cam_res->gpio_res_lock);
+	if (cam_res->pstatus == PINCTRL_STATUS_PUT) {
+		CAM_DBG(CAM_RES, "The shared pinctrl alerady been put.!");
+		mutex_unlock(&cam_res->gpio_res_lock);
+		return ret;
+	}
+
+	pinctrl_info = &cam_res->dt.pinctrl_info;
+
+	/*
+	 * If no gpio resource in gpio_res_list, and
+	 * no shared clk now, it means this device
+	 * don't have shared gpio.
+	 */
+	if (list_empty(&cam_res->gpio_res_list) &&
+		cam_res->shared_clk_ref_count < 1) {
+		ret = pinctrl_select_state(pinctrl_info->pinctrl,
+			pinctrl_info->gpio_state_suspend);
+		devm_pinctrl_put(pinctrl_info->pinctrl);
+		cam_res->pstatus = PINCTRL_STATUS_PUT;
+	}
+	mutex_unlock(&cam_res->gpio_res_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_pinctrl_post_init);
+
+static int cam_res_mgr_add_device(struct device *dev,
+	struct cam_gpio_res *gpio_res)
+{
+	struct cam_dev_res *dev_res = NULL;
+
+	dev_res = kzalloc(sizeof(struct cam_dev_res), GFP_KERNEL);
+	if (!dev_res)
+		return -ENOMEM;
+
+	dev_res->dev = dev;
+	INIT_LIST_HEAD(&dev_res->list);
+
+	list_add_tail(&dev_res->list, &gpio_res->dev_list);
+
+	return 0;
+}
+
+static bool cam_res_mgr_gpio_is_shared(uint gpio)
+{
+	int index = 0;
+	bool found = false;
+	struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+	for (; index < dt->num_shared_gpio; index++) {
+		if (gpio == dt->shared_gpio[index]) {
+			found = true;
+			break;
+		}
+	}
+
+	return found;
+}
+
+int cam_res_mgr_gpio_request(struct device *dev, uint gpio,
+		unsigned long flags, const char *label)
+{
+	int rc = 0;
+	bool found = false;
+	struct cam_gpio_res *gpio_res = NULL;
+
+	if (cam_res && cam_res->shared_gpio_enabled) {
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+			if (gpio == gpio_res->gpio) {
+				found = true;
+				break;
+			}
+		}
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	/*
+	 * found equal to false has two situation:
+	 * 1. shared gpio not enabled
+	 * 2. shared gpio enabled, but not find this gpio
+	 *    from the gpio_res_list
+	 * These two situation both need request gpio.
+	 */
+	if (!found) {
+		rc = gpio_request_one(gpio, flags, label);
+		if (rc) {
+			CAM_ERR(CAM_RES, "gpio %d:%s request fails",
+				gpio, label);
+			return rc;
+		}
+	}
+
+	/*
+	 * If the gpio is in the shared list, and not find
+	 * from gpio_res_list, then insert a cam_gpio_res
+	 * to gpio_res_list.
+	 */
+	if (!found && cam_res
+		&& cam_res->shared_gpio_enabled &&
+		cam_res_mgr_gpio_is_shared(gpio)) {
+
+		gpio_res = kzalloc(sizeof(struct cam_gpio_res), GFP_KERNEL);
+		if (!gpio_res)
+			return -ENOMEM;
+
+		gpio_res->gpio = gpio;
+		gpio_res->power_on_count = 0;
+		INIT_LIST_HEAD(&gpio_res->list);
+		INIT_LIST_HEAD(&gpio_res->dev_list);
+
+		rc = cam_res_mgr_add_device(dev, gpio_res);
+		if (rc) {
+			kfree(gpio_res);
+			return rc;
+		}
+
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_add_tail(&gpio_res->list, &cam_res->gpio_res_list);
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	if (found && cam_res
+		&& cam_res->shared_gpio_enabled) {
+		struct cam_dev_res *dev_res = NULL;
+
+		found = 0;
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_for_each_entry(dev_res, &gpio_res->dev_list, list) {
+			if (dev_res->dev == dev) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found)
+			rc = cam_res_mgr_add_device(dev, gpio_res);
+
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_request);
+
+static void cam_res_mgr_gpio_free(struct device *dev, uint gpio)
+{
+	bool found = false;
+	bool need_free = true;
+	int dev_num =  0;
+	struct cam_gpio_res *gpio_res = NULL;
+
+	if (cam_res && cam_res->shared_gpio_enabled) {
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+			if (gpio == gpio_res->gpio) {
+				found = true;
+				break;
+			}
+		}
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	if (found && cam_res
+		&& cam_res->shared_gpio_enabled) {
+		struct list_head *list;
+		struct cam_dev_res *dev_res = NULL;
+
+		mutex_lock(&cam_res->gpio_res_lock);
+		/* Count the dev number in the dev_list */
+		list_for_each(list, &gpio_res->dev_list)
+			dev_num++;
+
+		/*
+		 * Need free the gpio if only has last 1 device
+		 * in the dev_list, otherwise, not free this
+		 * gpio.
+		 */
+		if (dev_num == 1) {
+			dev_res = list_first_entry(&gpio_res->dev_list,
+				struct cam_dev_res, list);
+			list_del_init(&dev_res->list);
+			kfree(dev_res);
+
+			list_del_init(&gpio_res->list);
+			kfree(gpio_res);
+		} else {
+			list_for_each_entry(dev_res,
+				&gpio_res->dev_list, list) {
+				if (dev_res->dev == dev) {
+					list_del_init(&dev_res->list);
+					kfree(dev_res);
+					need_free = false;
+					break;
+				}
+			}
+		}
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	if (need_free)
+		gpio_free(gpio);
+}
+
+void cam_res_mgr_gpio_free_arry(struct device *dev,
+		const struct gpio *array, size_t num)
+{
+	while (num--)
+		cam_res_mgr_gpio_free(dev, (array[num]).gpio);
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_free_arry);
+
+int cam_res_mgr_gpio_set_value(unsigned int gpio, int value)
+{
+	int rc = 0;
+	bool found = false;
+	struct cam_gpio_res *gpio_res = NULL;
+
+	if (cam_res && cam_res->shared_gpio_enabled) {
+		mutex_lock(&cam_res->gpio_res_lock);
+		list_for_each_entry(gpio_res, &cam_res->gpio_res_list, list) {
+			if (gpio == gpio_res->gpio) {
+				found = true;
+				break;
+			}
+		}
+		mutex_unlock(&cam_res->gpio_res_lock);
+	}
+
+	/*
+	 * Set the value directly if can't find the gpio from
+	 * gpio_res_list, otherwise, need add ref count support
+	 **/
+	if (!found) {
+		gpio_set_value_cansleep(gpio, value);
+	} else {
+		if (value) {
+			gpio_res->power_on_count++;
+			if (gpio_res->power_on_count < 2) {
+				gpio_set_value_cansleep(gpio, value);
+				CAM_DBG(CAM_RES,
+					"Shared GPIO(%d) : HIGH", gpio);
+			}
+		} else {
+			gpio_res->power_on_count--;
+			if (gpio_res->power_on_count < 1) {
+				gpio_set_value_cansleep(gpio, value);
+				CAM_DBG(CAM_RES,
+					"Shared GPIO(%d) : LOW", gpio);
+			}
+		}
+	}
+
+	return rc;
+}
+EXPORT_SYMBOL(cam_res_mgr_gpio_set_value);
+
+void cam_res_mgr_shared_clk_config(bool value)
+{
+	if (!cam_res)
+		return;
+
+	mutex_lock(&cam_res->clk_res_lock);
+	if (value)
+		cam_res->shared_clk_ref_count++;
+	else
+		cam_res->shared_clk_ref_count--;
+	mutex_unlock(&cam_res->clk_res_lock);
+}
+EXPORT_SYMBOL(cam_res_mgr_shared_clk_config);
+
+static int cam_res_mgr_parse_dt(struct device *dev)
+{
+	int rc = 0;
+	struct device_node *of_node = NULL;
+	struct cam_res_mgr_dt *dt = &cam_res->dt;
+
+	of_node = dev->of_node;
+
+	dt->num_shared_gpio = of_property_count_u32_elems(of_node,
+		"shared-gpios");
+
+	if (dt->num_shared_gpio > MAX_SHARED_GPIO_SIZE ||
+		dt->num_shared_gpio <= 0) {
+		/*
+		 * Not really an error, it means dtsi not configure
+		 * the shared gpio.
+		 */
+		CAM_DBG(CAM_RES, "Invalid GPIO number %d. No shared gpio.",
+			dt->num_shared_gpio);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "shared-gpios",
+		dt->shared_gpio, dt->num_shared_gpio);
+	if (rc) {
+		CAM_ERR(CAM_RES, "Get shared gpio array failed.");
+		return -EINVAL;
+	}
+
+	dt->pinctrl_info.pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR_OR_NULL(dt->pinctrl_info.pinctrl)) {
+		CAM_ERR(CAM_RES, "Pinctrl not available");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check the pinctrl state to make sure the gpio
+	 * shared enabled.
+	 */
+	dt->pinctrl_info.gpio_state_active =
+		pinctrl_lookup_state(dt->pinctrl_info.pinctrl,
+			CAM_RES_MGR_DEFAULT);
+	if (IS_ERR_OR_NULL(dt->pinctrl_info.gpio_state_active)) {
+		CAM_ERR(CAM_RES,
+			"Failed to get the active state pinctrl handle");
+		return -EINVAL;
+	}
+
+	dt->pinctrl_info.gpio_state_suspend =
+		pinctrl_lookup_state(dt->pinctrl_info.pinctrl,
+			CAM_RES_MGR_SLEEP);
+	if (IS_ERR_OR_NULL(dt->pinctrl_info.gpio_state_suspend)) {
+		CAM_ERR(CAM_RES,
+			"Failed to get the active state pinctrl handle");
+		return -EINVAL;
+	}
+
+	devm_pinctrl_put(dt->pinctrl_info.pinctrl);
+
+	return rc;
+}
+
+static int cam_res_mgr_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+
+	cam_res = kzalloc(sizeof(*cam_res), GFP_KERNEL);
+	if (!cam_res)
+		return -ENOMEM;
+
+	cam_res->dev = &pdev->dev;
+	mutex_init(&cam_res->flash_res_lock);
+	mutex_init(&cam_res->gpio_res_lock);
+	mutex_init(&cam_res->clk_res_lock);
+
+	rc = cam_res_mgr_parse_dt(&pdev->dev);
+	if (rc) {
+		CAM_INFO(CAM_RES, "Disable shared gpio support.");
+		cam_res->shared_gpio_enabled = false;
+	} else {
+		CAM_INFO(CAM_RES, "Enable shared gpio support.");
+		cam_res->shared_gpio_enabled = true;
+	}
+
+	cam_res->shared_clk_ref_count = 0;
+	cam_res->pstatus = PINCTRL_STATUS_PUT;
+
+	INIT_LIST_HEAD(&cam_res->gpio_res_list);
+	INIT_LIST_HEAD(&cam_res->flash_res_list);
+
+	return 0;
+}
+
+static int cam_res_mgr_remove(struct platform_device *pdev)
+{
+	if (cam_res) {
+		cam_res_mgr_free_res();
+		kfree(cam_res);
+		cam_res = NULL;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id cam_res_mgr_dt_match[] = {
+	{.compatible = "qcom,cam-res-mgr"},
+	{}
+};
+MODULE_DEVICE_TABLE(of, cam_res_mgr_dt_match);
+
+static struct platform_driver cam_res_mgr_driver = {
+	.probe = cam_res_mgr_probe,
+	.remove = cam_res_mgr_remove,
+	.driver = {
+		.name = "cam_res_mgr",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_res_mgr_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_res_mgr_init(void)
+{
+	return platform_driver_register(&cam_res_mgr_driver);
+}
+
+static void __exit cam_res_mgr_exit(void)
+{
+	platform_driver_unregister(&cam_res_mgr_driver);
+}
+
+module_init(cam_res_mgr_init);
+module_exit(cam_res_mgr_exit);
+MODULE_DESCRIPTION("Camera resource manager driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
new file mode 100644
index 0000000..77486f9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_api.h
@@ -0,0 +1,141 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CAM_RES_MGR_API_H__
+#define __CAM_RES_MGR_API_H__
+
+#include <linux/leds.h>
+
+/**
+ * @brief: Register the led trigger
+ *
+ *  The newly registered led trigger is assigned to flash_res_list.
+ *
+ * @name  : Pointer to int led trigger name
+ * @tp    : Save the returned led trigger
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_register(const char *name,
+	struct led_trigger **tp);
+
+/**
+ * @brief: Unregister the led trigger
+ *
+ *  Free the flash_res if this led trigger isn't used by other device .
+ *
+ * @tp : Pointer to the led trigger
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_unregister(struct led_trigger *tp);
+
+/**
+ * @brief: Trigger the event to led core
+ *
+ * @trig       : Pointer to the led trigger
+ * @brightness : The brightness need to fire
+ *
+ * @return None
+ */
+void cam_res_mgr_led_trigger_event(struct led_trigger *trig,
+	enum led_brightness brightness);
+
+/**
+ * @brief: Get the corresponding pinctrl of dev
+ *
+ *  Init the shared pinctrl if shared pinctrl enabled.
+ *
+ * @return None
+ */
+int cam_res_mgr_shared_pinctrl_init(void);
+
+/**
+ * @brief: Put the pinctrl
+ *
+ *  Put the shared pinctrl.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void cam_res_mgr_shared_pinctrl_put(void);
+
+/**
+ * @brief: Select the corresponding state
+ *
+ *  Active state can be selected directly, but need hold to suspend the
+ *  pinctrl if the gpios in this pinctrl also held by other pinctrl.
+ *
+ * @active   : The flag to indicate whether active or suspend
+ * the shared pinctrl.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_shared_pinctrl_select_state(bool active);
+
+/**
+ * @brief: Post init shared pinctrl
+ *
+ *  Post init to check if the device really has shared gpio,
+ *  suspend and put the pinctrl if not use shared gpio.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_shared_pinctrl_post_init(void);
+
+/**
+ * @brief: Request a gpio
+ *
+ *  Will alloc a gpio_res for the new gpio, other find the corresponding
+ *  gpio_res.
+ *
+ * @dev   : Pointer to the device
+ * @gpio  : The GPIO number
+ * @flags : GPIO configuration as specified by GPIOF_*
+ * @label : A literal description string of this GPIO
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_res_mgr_gpio_request(struct device *dev, unsigned int gpio,
+		unsigned long flags, const char *label);
+
+/**
+ * @brief: Free a array GPIO
+ *
+ *  Free the GPIOs and release corresponding gpio_res.
+ *
+ * @dev   : Pointer to the device
+ * @gpio  : Array of the GPIO number
+ * @num   : The number of gpio
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+void cam_res_mgr_gpio_free_arry(struct device *dev,
+	const struct gpio *array, size_t num);
+
+/**
+ * @brief: Set GPIO power level
+ *
+ *  Add ref count support for shared GPIOs.
+ *
+ * @gpio   : The GPIO number
+ * @value  : The power level need to setup
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ * -EINVAL will be returned if the gpio can't be found in gpio_res_list.
+ */
+int cam_res_mgr_gpio_set_value(unsigned int gpio, int value);
+
+/**
+ * @brief: Config the shared clk ref count
+ *
+ *  Config the shared clk ref count..
+ *
+ * @value   : get or put the shared clk.
+ *
+ * @return None
+ */
+void cam_res_mgr_shared_clk_config(bool value);
+
+#endif /* __CAM_RES_MGR_API_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
new file mode 100644
index 0000000..bc948e6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr/cam_res_mgr_private.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CAM_RES_MGR_PRIVATE_H__
+#define __CAM_RES_MGR_PRIVATE_H__
+
+#include <linux/list.h>
+#include <linux/leds.h>
+#include "cam_soc_util.h"
+
+#define MAX_SHARED_GPIO_SIZE 16
+
+/* pinctrl states name */
+#define CAM_RES_MGR_SLEEP	"cam_res_mgr_suspend"
+#define CAM_RES_MGR_DEFAULT	"cam_res_mgr_default"
+
+/**
+ * enum pinctrl_status - Enum for pinctrl status
+ */
+enum pinctrl_status {
+	PINCTRL_STATUS_GOT = 0,
+	PINCTRL_STATUS_ACTIVE,
+	PINCTRL_STATUS_SUSPEND,
+	PINCTRL_STATUS_PUT,
+};
+
+/**
+ * struct cam_dev_res
+ *
+ * @list : List member used to append this node to a dev list
+ * @dev  : Device pointer associated with device
+ */
+struct cam_dev_res {
+	struct list_head list;
+	struct device    *dev;
+};
+
+/**
+ * struct cam_gpio_res
+ *
+ * @list           : List member used to append this node to a gpio list
+ * @dev_list       : List the device which request this gpio
+ * @gpio           : Gpio value
+ * @power_on_count : Record the power on times of this gpio
+ */
+struct cam_gpio_res {
+	struct list_head list;
+	struct list_head dev_list;
+	unsigned int     gpio;
+	int              power_on_count;
+};
+
+/**
+ * struct cam_pinctrl_res
+ *
+ * @list           : List member used to append this node to a linked list
+ * @name           : Pointer to the flash trigger's name.
+ * @trigger        : Pointer to the flash trigger
+ */
+struct cam_flash_res {
+	struct list_head   list;
+	const char         *name;
+	struct led_trigger *trigger;
+};
+
+/**
+ * struct cam_res_mgr_dt
+ *
+ * @shared_gpio     : Shared gpios list in the device tree
+ * @num_shared_gpio : The number of shared gpio
+ * @pinctrl_info    : Pinctrl information
+ */
+struct cam_res_mgr_dt {
+	uint                        shared_gpio[MAX_SHARED_GPIO_SIZE];
+	int                         num_shared_gpio;
+	struct cam_soc_pinctrl_info pinctrl_info;
+};
+
+/**
+ * struct cam_pinctrl_res
+ *
+ * @dev                 : Pointer to the device
+ * @dt                  : Device tree resource
+ * @shared_gpio_enabled : The flag to indicate if support shared gpio
+ * @pstatus             : Shared pinctrl status
+ * @gpio_res_list       : List head of the gpio resource
+ * @flash_res_list      : List head of the flash resource
+ * @gpio_res_lock       : GPIO resource lock
+ * @flash_res_lock      : Flash resource lock
+ * @clk_res_lock        : Clk resource lock
+ */
+struct cam_res_mgr {
+	struct device         *dev;
+	struct cam_res_mgr_dt dt;
+
+	bool                  shared_gpio_enabled;
+	enum pinctrl_status   pstatus;
+
+	uint                  shared_clk_ref_count;
+
+	struct list_head      gpio_res_list;
+	struct list_head      flash_res_list;
+	struct mutex          gpio_res_lock;
+	struct mutex          flash_res_lock;
+	struct mutex          clk_res_lock;
+};
+
+#endif /* __CAM_RES_MGR_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
new file mode 100644
index 0000000..0ba5a05
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_dev.o cam_sensor_core.o cam_sensor_soc.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
new file mode 100644
index 0000000..ffa30a1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.c
@@ -0,0 +1,1247 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <cam_sensor_cmn_header.h>
+#include "cam_sensor_core.h"
+#include "cam_sensor_util.h"
+#include "cam_soc_util.h"
+#include "cam_trace.h"
+#include "cam_common_util.h"
+
+static void cam_sensor_update_req_mgr(
+	struct cam_sensor_ctrl_t *s_ctrl,
+	struct cam_packet *csl_packet)
+{
+	struct cam_req_mgr_add_request add_req;
+
+	add_req.link_hdl = s_ctrl->bridge_intf.link_hdl;
+	add_req.req_id = csl_packet->header.request_id;
+	CAM_DBG(CAM_SENSOR, " Rxed Req Id: %lld",
+		csl_packet->header.request_id);
+	add_req.dev_hdl = s_ctrl->bridge_intf.device_hdl;
+	add_req.skip_before_applying = 0;
+	if (s_ctrl->bridge_intf.crm_cb &&
+		s_ctrl->bridge_intf.crm_cb->add_req)
+		s_ctrl->bridge_intf.crm_cb->add_req(&add_req);
+
+	CAM_DBG(CAM_SENSOR, " add req to req mgr: %lld",
+			add_req.req_id);
+}
+
+static void cam_sensor_release_stream_rsc(
+	struct cam_sensor_ctrl_t *s_ctrl)
+{
+	struct i2c_settings_array *i2c_set = NULL;
+	int rc;
+
+	i2c_set = &(s_ctrl->i2c_data.streamoff_settings);
+	if (i2c_set->is_settings_valid == 1) {
+		i2c_set->is_settings_valid = -1;
+		rc = delete_request(i2c_set);
+		if (rc < 0)
+			CAM_ERR(CAM_SENSOR,
+				"failed while deleting Streamoff settings");
+	}
+
+	i2c_set = &(s_ctrl->i2c_data.streamon_settings);
+	if (i2c_set->is_settings_valid == 1) {
+		i2c_set->is_settings_valid = -1;
+		rc = delete_request(i2c_set);
+		if (rc < 0)
+			CAM_ERR(CAM_SENSOR,
+				"failed while deleting Streamon settings");
+	}
+}
+
+static void cam_sensor_release_per_frame_resource(
+	struct cam_sensor_ctrl_t *s_ctrl)
+{
+	struct i2c_settings_array *i2c_set = NULL;
+	int i, rc;
+
+	if (s_ctrl->i2c_data.per_frame != NULL) {
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
+			if (i2c_set->is_settings_valid == 1) {
+				i2c_set->is_settings_valid = -1;
+				rc = delete_request(i2c_set);
+				if (rc < 0)
+					CAM_ERR(CAM_SENSOR,
+						"delete request: %lld rc: %d",
+						i2c_set->request_id, rc);
+			}
+		}
+	}
+}
+
+static int32_t cam_sensor_i2c_pkt_parse(struct cam_sensor_ctrl_t *s_ctrl,
+	void *arg)
+{
+	int32_t rc = 0;
+	uintptr_t generic_ptr;
+	struct cam_control *ioctl_ctrl = NULL;
+	struct cam_packet *csl_packet = NULL;
+	struct cam_cmd_buf_desc *cmd_desc = NULL;
+	struct i2c_settings_array *i2c_reg_settings = NULL;
+	size_t len_of_buff = 0;
+	uint32_t *offset = NULL;
+	struct cam_config_dev_cmd config;
+	struct i2c_data_settings *i2c_data = NULL;
+
+	ioctl_ctrl = (struct cam_control *)arg;
+
+	if (ioctl_ctrl->handle_type != CAM_HANDLE_USER_POINTER) {
+		CAM_ERR(CAM_SENSOR, "Invalid Handle Type");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&config,
+		u64_to_user_ptr(ioctl_ctrl->handle),
+		sizeof(config)))
+		return -EFAULT;
+
+	rc = cam_mem_get_cpu_buf(
+		config.packet_handle,
+		&generic_ptr,
+		&len_of_buff);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "Failed in getting the buffer: %d", rc);
+		return rc;
+	}
+
+	csl_packet = (struct cam_packet *)(generic_ptr +
+		(uint32_t)config.offset);
+	if (config.offset > len_of_buff) {
+		CAM_ERR(CAM_SENSOR,
+			"offset is out of bounds: off: %lld len: %zu",
+			 config.offset, len_of_buff);
+		return -EINVAL;
+	}
+
+	if ((csl_packet->header.op_code & 0xFFFFFF) !=
+		CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG &&
+		csl_packet->header.request_id <= s_ctrl->last_flush_req
+		&& s_ctrl->last_flush_req != 0) {
+		CAM_DBG(CAM_SENSOR,
+			"reject request %lld, last request to flush %lld",
+			csl_packet->header.request_id, s_ctrl->last_flush_req);
+		return -EINVAL;
+	}
+
+	if (csl_packet->header.request_id > s_ctrl->last_flush_req)
+		s_ctrl->last_flush_req = 0;
+
+	i2c_data = &(s_ctrl->i2c_data);
+	CAM_DBG(CAM_SENSOR, "Header OpCode: %d", csl_packet->header.op_code);
+	switch (csl_packet->header.op_code & 0xFFFFFF) {
+	case CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG: {
+		i2c_reg_settings = &i2c_data->init_settings;
+		i2c_reg_settings->request_id = 0;
+		i2c_reg_settings->is_settings_valid = 1;
+		break;
+	}
+	case CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG: {
+		i2c_reg_settings = &i2c_data->config_settings;
+		i2c_reg_settings->request_id = 0;
+		i2c_reg_settings->is_settings_valid = 1;
+		break;
+	}
+	case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON: {
+		if (s_ctrl->streamon_count > 0)
+			return 0;
+
+		s_ctrl->streamon_count = s_ctrl->streamon_count + 1;
+		i2c_reg_settings = &i2c_data->streamon_settings;
+		i2c_reg_settings->request_id = 0;
+		i2c_reg_settings->is_settings_valid = 1;
+		break;
+	}
+	case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF: {
+		if (s_ctrl->streamoff_count > 0)
+			return 0;
+
+		s_ctrl->streamoff_count = s_ctrl->streamoff_count + 1;
+		i2c_reg_settings = &i2c_data->streamoff_settings;
+		i2c_reg_settings->request_id = 0;
+		i2c_reg_settings->is_settings_valid = 1;
+		break;
+	}
+
+	case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE: {
+		if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+			CAM_WARN(CAM_SENSOR,
+				"Rxed Update packets without linking");
+			return 0;
+		}
+
+		i2c_reg_settings =
+			&i2c_data->per_frame[csl_packet->header.request_id %
+				MAX_PER_FRAME_ARRAY];
+		CAM_DBG(CAM_SENSOR, "Received Packet: %lld req: %lld",
+			csl_packet->header.request_id % MAX_PER_FRAME_ARRAY,
+			csl_packet->header.request_id);
+		if (i2c_reg_settings->is_settings_valid == 1) {
+			CAM_ERR(CAM_SENSOR,
+				"Already some pkt in offset req : %lld",
+				csl_packet->header.request_id);
+			/*
+			 * Update req mgr even in case of failure.
+			 * This will help not to wait indefinitely
+			 * and freeze. If this log is triggered then
+			 * fix it.
+			 */
+			cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+			return 0;
+		}
+		break;
+	}
+	case CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP: {
+		if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_ACQUIRE)) {
+			CAM_WARN(CAM_SENSOR,
+				"Rxed NOP packets without linking");
+			return 0;
+		}
+
+		cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+		return 0;
+	}
+	default:
+		CAM_ERR(CAM_SENSOR, "Invalid Packet Header");
+		return -EINVAL;
+	}
+
+	offset = (uint32_t *)&csl_packet->payload;
+	offset += csl_packet->cmd_buf_offset / 4;
+	cmd_desc = (struct cam_cmd_buf_desc *)(offset);
+
+	rc = cam_sensor_i2c_command_parser(&s_ctrl->io_master_info,
+			i2c_reg_settings, cmd_desc, 1);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "Fail parsing I2C Pkt: %d", rc);
+		return rc;
+	}
+
+	if ((csl_packet->header.op_code & 0xFFFFFF) ==
+		CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE) {
+		i2c_reg_settings->request_id =
+			csl_packet->header.request_id;
+		cam_sensor_update_req_mgr(s_ctrl, csl_packet);
+	}
+
+	return rc;
+}
+
+static int32_t cam_sensor_i2c_modes_util(
+	struct camera_io_master *io_master_info,
+	struct i2c_settings_list *i2c_list)
+{
+	int32_t rc = 0;
+	uint32_t i, size;
+
+	if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_RANDOM) {
+		rc = camera_io_dev_write(io_master_info,
+			&(i2c_list->i2c_settings));
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to random write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	} else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_SEQ) {
+		rc = camera_io_dev_write_continuous(
+			io_master_info,
+			&(i2c_list->i2c_settings),
+			0);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to seq write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	} else if (i2c_list->op_code == CAM_SENSOR_I2C_WRITE_BURST) {
+		rc = camera_io_dev_write_continuous(
+			io_master_info,
+			&(i2c_list->i2c_settings),
+			1);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to burst write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	} else if (i2c_list->op_code == CAM_SENSOR_I2C_POLL) {
+		size = i2c_list->i2c_settings.size;
+		for (i = 0; i < size; i++) {
+			rc = camera_io_dev_poll(
+			io_master_info,
+			i2c_list->i2c_settings.reg_setting[i].reg_addr,
+			i2c_list->i2c_settings.reg_setting[i].reg_data,
+			i2c_list->i2c_settings.reg_setting[i].data_mask,
+			i2c_list->i2c_settings.addr_type,
+			i2c_list->i2c_settings.data_type,
+			i2c_list->i2c_settings.reg_setting[i].delay);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"i2c poll apply setting Fail: %d", rc);
+				return rc;
+			}
+		}
+	}
+
+	return rc;
+}
+
+int32_t cam_sensor_update_i2c_info(struct cam_cmd_i2c_info *i2c_info,
+	struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+	struct cam_sensor_cci_client   *cci_client = NULL;
+
+	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+		cci_client = s_ctrl->io_master_info.cci_client;
+		if (!cci_client) {
+			CAM_ERR(CAM_SENSOR, "failed: cci_client %pK",
+				cci_client);
+			return -EINVAL;
+		}
+		cci_client->cci_i2c_master = s_ctrl->cci_i2c_master;
+		cci_client->sid = i2c_info->slave_addr >> 1;
+		cci_client->retries = 3;
+		cci_client->id_map = 0;
+		cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+		CAM_DBG(CAM_SENSOR, " Master: %d sid: %d freq_mode: %d",
+			cci_client->cci_i2c_master, i2c_info->slave_addr,
+			i2c_info->i2c_freq_mode);
+	}
+
+	s_ctrl->sensordata->slave_info.sensor_slave_addr =
+		i2c_info->slave_addr;
+	return rc;
+}
+
+int32_t cam_sensor_update_slave_info(struct cam_cmd_probe *probe_info,
+	struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+
+	s_ctrl->sensordata->slave_info.sensor_id_reg_addr =
+		probe_info->reg_addr;
+	s_ctrl->sensordata->slave_info.sensor_id =
+		probe_info->expected_data;
+	s_ctrl->sensordata->slave_info.sensor_id_mask =
+		probe_info->data_mask;
+	/* Userspace passes the pipeline delay in reserved field */
+	s_ctrl->pipeline_delay =
+		probe_info->reserved;
+
+	s_ctrl->sensor_probe_addr_type =  probe_info->addr_type;
+	s_ctrl->sensor_probe_data_type =  probe_info->data_type;
+	CAM_DBG(CAM_SENSOR,
+		"Sensor Addr: 0x%x sensor_id: 0x%x sensor_mask: 0x%x sensor_pipeline_delay:0x%x",
+		s_ctrl->sensordata->slave_info.sensor_id_reg_addr,
+		s_ctrl->sensordata->slave_info.sensor_id,
+		s_ctrl->sensordata->slave_info.sensor_id_mask,
+		s_ctrl->pipeline_delay);
+	return rc;
+}
+
+int32_t cam_handle_cmd_buffers_for_probe(void *cmd_buf,
+	struct cam_sensor_ctrl_t *s_ctrl,
+	int32_t cmd_buf_num, int cmd_buf_length)
+{
+	int32_t rc = 0;
+
+	switch (cmd_buf_num) {
+	case 0: {
+		struct cam_cmd_i2c_info *i2c_info = NULL;
+		struct cam_cmd_probe *probe_info;
+
+		i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+		rc = cam_sensor_update_i2c_info(i2c_info, s_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "Failed in Updating the i2c Info");
+			return rc;
+		}
+		probe_info = (struct cam_cmd_probe *)
+			(cmd_buf + sizeof(struct cam_cmd_i2c_info));
+		rc = cam_sensor_update_slave_info(probe_info, s_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "Updating the slave Info");
+			return rc;
+		}
+		cmd_buf = probe_info;
+	}
+		break;
+	case 1: {
+		rc = cam_sensor_update_power_settings(cmd_buf,
+			cmd_buf_length, &s_ctrl->sensordata->power_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed in updating power settings");
+			return rc;
+		}
+	}
+		break;
+	default:
+		CAM_ERR(CAM_SENSOR, "Invalid command buffer");
+		break;
+	}
+	return rc;
+}
+
+int32_t cam_handle_mem_ptr(uint64_t handle, struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int rc = 0, i;
+	uint32_t *cmd_buf;
+	void *ptr;
+	size_t len;
+	struct cam_packet *pkt;
+	struct cam_cmd_buf_desc *cmd_desc;
+	uintptr_t cmd_buf1 = 0;
+	uintptr_t packet = 0;
+
+	rc = cam_mem_get_cpu_buf(handle,
+		&packet, &len);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "Failed to get the command Buffer");
+		return -EINVAL;
+	}
+	pkt = (struct cam_packet *)packet;
+	cmd_desc = (struct cam_cmd_buf_desc *)
+		((uint32_t *)&pkt->payload + pkt->cmd_buf_offset/4);
+	if (cmd_desc == NULL) {
+		CAM_ERR(CAM_SENSOR, "command descriptor pos is invalid");
+		return -EINVAL;
+	}
+	if (pkt->num_cmd_buf != 2) {
+		CAM_ERR(CAM_SENSOR, "Expected More Command Buffers : %d",
+			 pkt->num_cmd_buf);
+		return -EINVAL;
+	}
+	for (i = 0; i < pkt->num_cmd_buf; i++) {
+		if (!(cmd_desc[i].length))
+			continue;
+		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+			&cmd_buf1, &len);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to parse the command Buffer Header");
+			return -EINVAL;
+		}
+		cmd_buf = (uint32_t *)cmd_buf1;
+		cmd_buf += cmd_desc[i].offset/4;
+		ptr = (void *) cmd_buf;
+
+		rc = cam_handle_cmd_buffers_for_probe(ptr, s_ctrl,
+			i, cmd_desc[i].length);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to parse the command Buffer Header");
+			return -EINVAL;
+		}
+	}
+	return rc;
+}
+
+void cam_sensor_query_cap(struct cam_sensor_ctrl_t *s_ctrl,
+	struct  cam_sensor_query_cap *query_cap)
+{
+	query_cap->pos_roll = s_ctrl->sensordata->pos_roll;
+	query_cap->pos_pitch = s_ctrl->sensordata->pos_pitch;
+	query_cap->pos_yaw = s_ctrl->sensordata->pos_yaw;
+	query_cap->secure_camera = 0;
+	query_cap->actuator_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_ACTUATOR];
+	query_cap->csiphy_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_CSIPHY];
+	query_cap->eeprom_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_EEPROM];
+	query_cap->flash_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_LED_FLASH];
+	query_cap->ois_slot_id =
+		s_ctrl->sensordata->subdev_id[SUB_MODULE_OIS];
+	query_cap->slot_info =
+		s_ctrl->soc_info.index;
+}
+
+static uint16_t cam_sensor_id_by_mask(struct cam_sensor_ctrl_t *s_ctrl,
+	uint32_t chipid)
+{
+	uint16_t sensor_id = (uint16_t)(chipid & 0xFFFF);
+	int16_t sensor_id_mask = s_ctrl->sensordata->slave_info.sensor_id_mask;
+
+	if (!sensor_id_mask)
+		sensor_id_mask = ~sensor_id_mask;
+
+	sensor_id &= sensor_id_mask;
+	sensor_id_mask &= -sensor_id_mask;
+	sensor_id_mask -= 1;
+	while (sensor_id_mask) {
+		sensor_id_mask >>= 1;
+		sensor_id >>= 1;
+	}
+	return sensor_id;
+}
+
+void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	struct cam_sensor_power_ctrl_t *power_info =
+		&s_ctrl->sensordata->power_info;
+	int rc = 0;
+
+	if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) &&
+		(s_ctrl->is_probe_succeed == 0))
+		return;
+
+	cam_sensor_release_stream_rsc(s_ctrl);
+	cam_sensor_release_per_frame_resource(s_ctrl);
+	cam_sensor_power_down(s_ctrl);
+
+	rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, " failed destroying dhdl");
+	s_ctrl->bridge_intf.device_hdl = -1;
+	s_ctrl->bridge_intf.link_hdl = -1;
+	s_ctrl->bridge_intf.session_hdl = -1;
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	power_info->power_setting_size = 0;
+	power_info->power_down_setting_size = 0;
+	s_ctrl->streamon_count = 0;
+	s_ctrl->streamoff_count = 0;
+	s_ctrl->is_probe_succeed = 0;
+	s_ctrl->last_flush_req = 0;
+	s_ctrl->sensor_state = CAM_SENSOR_INIT;
+}
+
+int cam_sensor_match_id(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int rc = 0;
+	uint32_t chipid = 0;
+	struct cam_camera_slave_info *slave_info;
+
+	slave_info = &(s_ctrl->sensordata->slave_info);
+
+	if (!slave_info) {
+		CAM_ERR(CAM_SENSOR, " failed: %pK",
+			 slave_info);
+		return -EINVAL;
+	}
+
+	rc = camera_io_dev_read(
+		&(s_ctrl->io_master_info),
+		slave_info->sensor_id_reg_addr,
+		&chipid, CAMERA_SENSOR_I2C_TYPE_WORD,
+		CAMERA_SENSOR_I2C_TYPE_WORD);
+
+	CAM_DBG(CAM_SENSOR, "read id: 0x%x expected id 0x%x:",
+			 chipid, slave_info->sensor_id);
+	if (cam_sensor_id_by_mask(s_ctrl, chipid) != slave_info->sensor_id) {
+		CAM_ERR(CAM_SENSOR, "chip id %x does not match %x",
+				chipid, slave_info->sensor_id);
+		return -ENODEV;
+	}
+	return rc;
+}
+
+int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl,
+	void *arg)
+{
+	int rc = 0;
+	struct cam_control *cmd = (struct cam_control *)arg;
+	struct cam_sensor_power_ctrl_t *power_info =
+		&s_ctrl->sensordata->power_info;
+	if (!s_ctrl || !arg) {
+		CAM_ERR(CAM_SENSOR, "s_ctrl is NULL");
+		return -EINVAL;
+	}
+
+	if (cmd->op_code != CAM_SENSOR_PROBE_CMD) {
+		if (cmd->handle_type != CAM_HANDLE_USER_POINTER) {
+			CAM_ERR(CAM_SENSOR, "Invalid handle type: %d",
+				cmd->handle_type);
+			return -EINVAL;
+		}
+	}
+
+	mutex_lock(&(s_ctrl->cam_sensor_mutex));
+	switch (cmd->op_code) {
+	case CAM_SENSOR_PROBE_CMD: {
+		if (s_ctrl->is_probe_succeed == 1) {
+			CAM_ERR(CAM_SENSOR,
+				"Already Sensor Probed in the slot");
+			break;
+		}
+
+		if (cmd->handle_type ==
+			CAM_HANDLE_MEM_HANDLE) {
+			rc = cam_handle_mem_ptr(cmd->handle, s_ctrl);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR, "Get Buffer Handle Failed");
+				goto release_mutex;
+			}
+		} else {
+			CAM_ERR(CAM_SENSOR, "Invalid Command Type: %d",
+				 cmd->handle_type);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		/* Parse and fill vreg params for powerup settings */
+		rc = msm_camera_fill_vreg_params(
+			&s_ctrl->soc_info,
+			s_ctrl->sensordata->power_info.power_setting,
+			s_ctrl->sensordata->power_info.power_setting_size);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Fail in filling vreg params for PUP rc %d",
+				 rc);
+			goto free_power_settings;
+		}
+
+		/* Parse and fill vreg params for powerdown settings*/
+		rc = msm_camera_fill_vreg_params(
+			&s_ctrl->soc_info,
+			s_ctrl->sensordata->power_info.power_down_setting,
+			s_ctrl->sensordata->power_info.power_down_setting_size);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Fail in filling vreg params for PDOWN rc %d",
+				 rc);
+			goto free_power_settings;
+		}
+
+		/* Power up and probe sensor */
+		rc = cam_sensor_power_up(s_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "power up failed");
+			goto free_power_settings;
+		}
+
+		/* Match sensor ID */
+		rc = cam_sensor_match_id(s_ctrl);
+		if (rc < 0) {
+			cam_sensor_power_down(s_ctrl);
+			msleep(20);
+			goto free_power_settings;
+		}
+
+		CAM_INFO(CAM_SENSOR,
+			"Probe success,slot:%d,slave_addr:0x%x,sensor_id:0x%x",
+			s_ctrl->soc_info.index,
+			s_ctrl->sensordata->slave_info.sensor_slave_addr,
+			s_ctrl->sensordata->slave_info.sensor_id);
+
+		rc = cam_sensor_power_down(s_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "fail in Sensor Power Down");
+			goto free_power_settings;
+		}
+		/*
+		 * Set probe succeeded flag to 1 so that no other camera shall
+		 * probed on this slot
+		 */
+		s_ctrl->is_probe_succeed = 1;
+		s_ctrl->sensor_state = CAM_SENSOR_INIT;
+	}
+		break;
+	case CAM_ACQUIRE_DEV: {
+		struct cam_sensor_acquire_dev sensor_acq_dev;
+		struct cam_create_dev_hdl bridge_params;
+
+		if ((s_ctrl->is_probe_succeed == 0) ||
+			(s_ctrl->sensor_state != CAM_SENSOR_INIT)) {
+			CAM_WARN(CAM_SENSOR,
+				"Not in right state to aquire %d",
+				s_ctrl->sensor_state);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+
+		if (s_ctrl->bridge_intf.device_hdl != -1) {
+			CAM_ERR(CAM_SENSOR, "Device is already acquired");
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = copy_from_user(&sensor_acq_dev,
+			u64_to_user_ptr(cmd->handle),
+			sizeof(sensor_acq_dev));
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "Failed Copying from user");
+			goto release_mutex;
+		}
+
+		bridge_params.session_hdl = sensor_acq_dev.session_handle;
+		bridge_params.ops = &s_ctrl->bridge_intf.ops;
+		bridge_params.v4l2_sub_dev_flag = 0;
+		bridge_params.media_entity_flag = 0;
+		bridge_params.priv = s_ctrl;
+
+		sensor_acq_dev.device_handle =
+			cam_create_device_hdl(&bridge_params);
+		s_ctrl->bridge_intf.device_hdl = sensor_acq_dev.device_handle;
+		s_ctrl->bridge_intf.session_hdl = sensor_acq_dev.session_handle;
+
+		CAM_DBG(CAM_SENSOR, "Device Handle: %d",
+			sensor_acq_dev.device_handle);
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&sensor_acq_dev,
+			sizeof(struct cam_sensor_acquire_dev))) {
+			CAM_ERR(CAM_SENSOR, "Failed Copy to User");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+
+		rc = cam_sensor_power_up(s_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "Sensor Power up failed");
+			goto release_mutex;
+		}
+
+		s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
+		s_ctrl->last_flush_req = 0;
+		CAM_INFO(CAM_SENSOR,
+			"CAM_ACQUIRE_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
+			s_ctrl->sensordata->slave_info.sensor_id,
+			s_ctrl->sensordata->slave_info.sensor_slave_addr);
+	}
+		break;
+	case CAM_RELEASE_DEV: {
+		if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_START)) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_SENSOR,
+			"Not in right state to release : %d",
+			s_ctrl->sensor_state);
+			goto release_mutex;
+		}
+
+		if (s_ctrl->bridge_intf.link_hdl != -1) {
+			CAM_ERR(CAM_SENSOR,
+				"Device [%d] still active on link 0x%x",
+				s_ctrl->sensor_state,
+				s_ctrl->bridge_intf.link_hdl);
+			rc = -EAGAIN;
+			goto release_mutex;
+		}
+
+		rc = cam_sensor_power_down(s_ctrl);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "Sensor Power Down failed");
+			goto release_mutex;
+		}
+
+		cam_sensor_release_per_frame_resource(s_ctrl);
+		cam_sensor_release_stream_rsc(s_ctrl);
+		if (s_ctrl->bridge_intf.device_hdl == -1) {
+			CAM_ERR(CAM_SENSOR,
+				"Invalid Handles: link hdl: %d device hdl: %d",
+				s_ctrl->bridge_intf.device_hdl,
+				s_ctrl->bridge_intf.link_hdl);
+			rc = -EINVAL;
+			goto release_mutex;
+		}
+		rc = cam_destroy_device_hdl(s_ctrl->bridge_intf.device_hdl);
+		if (rc < 0)
+			CAM_ERR(CAM_SENSOR,
+				"failed in destroying the device hdl");
+		s_ctrl->bridge_intf.device_hdl = -1;
+		s_ctrl->bridge_intf.link_hdl = -1;
+		s_ctrl->bridge_intf.session_hdl = -1;
+
+		s_ctrl->sensor_state = CAM_SENSOR_INIT;
+		CAM_INFO(CAM_SENSOR,
+			"CAM_RELEASE_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
+			s_ctrl->sensordata->slave_info.sensor_id,
+			s_ctrl->sensordata->slave_info.sensor_slave_addr);
+		s_ctrl->streamon_count = 0;
+		s_ctrl->streamoff_count = 0;
+		s_ctrl->last_flush_req = 0;
+	}
+		break;
+	case CAM_QUERY_CAP: {
+		struct  cam_sensor_query_cap sensor_cap;
+
+		cam_sensor_query_cap(s_ctrl, &sensor_cap);
+		if (copy_to_user(u64_to_user_ptr(cmd->handle),
+			&sensor_cap, sizeof(struct  cam_sensor_query_cap))) {
+			CAM_ERR(CAM_SENSOR, "Failed Copy to User");
+			rc = -EFAULT;
+			goto release_mutex;
+		}
+		break;
+	}
+	case CAM_START_DEV: {
+		if ((s_ctrl->sensor_state == CAM_SENSOR_INIT) ||
+			(s_ctrl->sensor_state == CAM_SENSOR_START)) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_SENSOR,
+			"Not in right state to start : %d",
+			s_ctrl->sensor_state);
+			goto release_mutex;
+		}
+
+		if (s_ctrl->i2c_data.streamon_settings.is_settings_valid &&
+			(s_ctrl->i2c_data.streamon_settings.request_id == 0)) {
+			rc = cam_sensor_apply_settings(s_ctrl, 0,
+				CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"cannot apply streamon settings");
+				goto release_mutex;
+			}
+		}
+		s_ctrl->sensor_state = CAM_SENSOR_START;
+		CAM_INFO(CAM_SENSOR,
+			"CAM_START_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
+			s_ctrl->sensordata->slave_info.sensor_id,
+			s_ctrl->sensordata->slave_info.sensor_slave_addr);
+	}
+		break;
+	case CAM_STOP_DEV: {
+		if (s_ctrl->sensor_state != CAM_SENSOR_START) {
+			rc = -EINVAL;
+			CAM_WARN(CAM_SENSOR,
+			"Not in right state to stop : %d",
+			s_ctrl->sensor_state);
+			goto release_mutex;
+		}
+
+		if (s_ctrl->i2c_data.streamoff_settings.is_settings_valid &&
+			(s_ctrl->i2c_data.streamoff_settings.request_id == 0)) {
+			rc = cam_sensor_apply_settings(s_ctrl, 0,
+				CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+				"cannot apply streamoff settings");
+			}
+		}
+
+		cam_sensor_release_per_frame_resource(s_ctrl);
+		s_ctrl->last_flush_req = 0;
+		s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
+		CAM_INFO(CAM_SENSOR,
+			"CAM_STOP_DEV Success, sensor_id:0x%x,sensor_slave_addr:0x%x",
+			s_ctrl->sensordata->slave_info.sensor_id,
+			s_ctrl->sensordata->slave_info.sensor_slave_addr);
+	}
+		break;
+	case CAM_CONFIG_DEV: {
+		rc = cam_sensor_i2c_pkt_parse(s_ctrl, arg);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "Failed CCI Config: %d", rc);
+			goto release_mutex;
+		}
+		if (s_ctrl->i2c_data.init_settings.is_settings_valid &&
+			(s_ctrl->i2c_data.init_settings.request_id == 0)) {
+
+			rc = cam_sensor_apply_settings(s_ctrl, 0,
+				CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"cannot apply init settings");
+				goto release_mutex;
+			}
+			rc = delete_request(&s_ctrl->i2c_data.init_settings);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"Fail in deleting the Init settings");
+				goto release_mutex;
+			}
+			s_ctrl->i2c_data.init_settings.request_id = -1;
+		}
+
+		if (s_ctrl->i2c_data.config_settings.is_settings_valid &&
+			(s_ctrl->i2c_data.config_settings.request_id == 0)) {
+			rc = cam_sensor_apply_settings(s_ctrl, 0,
+				CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"cannot apply config settings");
+				goto release_mutex;
+			}
+			rc = delete_request(&s_ctrl->i2c_data.config_settings);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"Fail in deleting the config settings");
+				goto release_mutex;
+			}
+			s_ctrl->sensor_state = CAM_SENSOR_CONFIG;
+			s_ctrl->i2c_data.config_settings.request_id = -1;
+		}
+	}
+		break;
+	default:
+		CAM_ERR(CAM_SENSOR, "Invalid Opcode: %d", cmd->op_code);
+		rc = -EINVAL;
+		goto release_mutex;
+	}
+
+release_mutex:
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+	return rc;
+
+free_power_settings:
+	kfree(power_info->power_setting);
+	kfree(power_info->power_down_setting);
+	power_info->power_setting = NULL;
+	power_info->power_down_setting = NULL;
+	power_info->power_down_setting_size = 0;
+	power_info->power_setting_size = 0;
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+	return rc;
+}
+
+int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info)
+{
+	int rc = 0;
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+	if (!info)
+		return -EINVAL;
+
+	s_ctrl = (struct cam_sensor_ctrl_t *)
+		cam_get_device_priv(info->dev_hdl);
+
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	info->dev_id = CAM_REQ_MGR_DEVICE_SENSOR;
+	strlcpy(info->name, CAM_SENSOR_NAME, sizeof(info->name));
+	if (s_ctrl->pipeline_delay >= 1 && s_ctrl->pipeline_delay <= 3)
+		info->p_delay = s_ctrl->pipeline_delay;
+	else
+		info->p_delay = 2;
+	info->trigger = CAM_TRIGGER_POINT_SOF;
+
+	return rc;
+}
+
+int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link)
+{
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+	if (!link)
+		return -EINVAL;
+
+	s_ctrl = (struct cam_sensor_ctrl_t *)
+		cam_get_device_priv(link->dev_hdl);
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&s_ctrl->cam_sensor_mutex);
+	if (link->link_enable) {
+		s_ctrl->bridge_intf.link_hdl = link->link_hdl;
+		s_ctrl->bridge_intf.crm_cb = link->crm_cb;
+	} else {
+		s_ctrl->bridge_intf.link_hdl = -1;
+		s_ctrl->bridge_intf.crm_cb = NULL;
+	}
+	mutex_unlock(&s_ctrl->cam_sensor_mutex);
+
+	return 0;
+}
+
+int cam_sensor_power(struct v4l2_subdev *sd, int on)
+{
+	struct cam_sensor_ctrl_t *s_ctrl = v4l2_get_subdevdata(sd);
+
+	mutex_lock(&(s_ctrl->cam_sensor_mutex));
+	if (!on && s_ctrl->sensor_state == CAM_SENSOR_START) {
+		cam_sensor_power_down(s_ctrl);
+		s_ctrl->sensor_state = CAM_SENSOR_ACQUIRE;
+	}
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+
+	return 0;
+}
+
+int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int rc;
+	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_camera_slave_info *slave_info;
+	struct cam_hw_soc_info *soc_info =
+		&s_ctrl->soc_info;
+
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "failed: %pK", s_ctrl);
+		return -EINVAL;
+	}
+
+	power_info = &s_ctrl->sensordata->power_info;
+	slave_info = &(s_ctrl->sensordata->slave_info);
+
+	if (!power_info || !slave_info) {
+		CAM_ERR(CAM_SENSOR, "failed: %pK %pK", power_info, slave_info);
+		return -EINVAL;
+	}
+
+	if (s_ctrl->bob_pwm_switch) {
+		rc = cam_sensor_bob_pwm_mode_switch(soc_info,
+			s_ctrl->bob_reg_index, true);
+		if (rc) {
+			CAM_WARN(CAM_SENSOR,
+			"BoB PWM setup failed rc: %d", rc);
+			rc = 0;
+		}
+	}
+
+	rc = cam_sensor_core_power_up(power_info, soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "power up the core is failed:%d", rc);
+		return rc;
+	}
+
+	rc = camera_io_init(&(s_ctrl->io_master_info));
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "cci_init failed: rc: %d", rc);
+
+	return rc;
+}
+
+int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	struct cam_sensor_power_ctrl_t *power_info;
+	struct cam_hw_soc_info *soc_info;
+	int rc = 0;
+
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "failed: s_ctrl %pK", s_ctrl);
+		return -EINVAL;
+	}
+
+	power_info = &s_ctrl->sensordata->power_info;
+	soc_info = &s_ctrl->soc_info;
+
+	if (!power_info) {
+		CAM_ERR(CAM_SENSOR, "failed: power_info %pK", power_info);
+		return -EINVAL;
+	}
+	rc = cam_sensor_util_power_down(power_info, soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "power down the core is failed:%d", rc);
+		return rc;
+	}
+
+	if (s_ctrl->bob_pwm_switch) {
+		rc = cam_sensor_bob_pwm_mode_switch(soc_info,
+			s_ctrl->bob_reg_index, false);
+		if (rc) {
+			CAM_WARN(CAM_SENSOR,
+				"BoB PWM setup failed rc: %d", rc);
+			rc = 0;
+		}
+	}
+
+	camera_io_release(&(s_ctrl->io_master_info));
+
+	return rc;
+}
+
+int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl,
+	int64_t req_id, enum cam_sensor_packet_opcodes opcode)
+{
+	int rc = 0, offset, i;
+	uint64_t top = 0, del_req_id = 0;
+	struct i2c_settings_array *i2c_set = NULL;
+	struct i2c_settings_list *i2c_list;
+
+	if (req_id == 0) {
+		switch (opcode) {
+		case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON: {
+			i2c_set = &s_ctrl->i2c_data.streamon_settings;
+			break;
+		}
+		case CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG: {
+			i2c_set = &s_ctrl->i2c_data.init_settings;
+			break;
+		}
+		case CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG: {
+			i2c_set = &s_ctrl->i2c_data.config_settings;
+			break;
+		}
+		case CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF: {
+			i2c_set = &s_ctrl->i2c_data.streamoff_settings;
+			break;
+		}
+		case CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE:
+		case CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE:
+		default:
+			return 0;
+		}
+		if (i2c_set->is_settings_valid == 1) {
+			list_for_each_entry(i2c_list,
+				&(i2c_set->list_head), list) {
+				rc = cam_sensor_i2c_modes_util(
+					&(s_ctrl->io_master_info),
+					i2c_list);
+				if (rc < 0) {
+					CAM_ERR(CAM_SENSOR,
+						"Failed to apply settings: %d",
+						rc);
+					return rc;
+				}
+			}
+		}
+	} else {
+		offset = req_id % MAX_PER_FRAME_ARRAY;
+		i2c_set = &(s_ctrl->i2c_data.per_frame[offset]);
+		if (i2c_set->is_settings_valid == 1 &&
+			i2c_set->request_id == req_id) {
+			list_for_each_entry(i2c_list,
+				&(i2c_set->list_head), list) {
+				rc = cam_sensor_i2c_modes_util(
+					&(s_ctrl->io_master_info),
+					i2c_list);
+				if (rc < 0) {
+					CAM_ERR(CAM_SENSOR,
+						"Failed to apply settings: %d",
+						rc);
+					return rc;
+				}
+			}
+		} else {
+			CAM_DBG(CAM_SENSOR,
+				"Invalid/NOP request to apply: %lld", req_id);
+		}
+
+		/* Change the logic dynamically */
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			if ((req_id >=
+				s_ctrl->i2c_data.per_frame[i].request_id) &&
+				(top <
+				s_ctrl->i2c_data.per_frame[i].request_id) &&
+				(s_ctrl->i2c_data.per_frame[i].is_settings_valid
+					== 1)) {
+				del_req_id = top;
+				top = s_ctrl->i2c_data.per_frame[i].request_id;
+			}
+		}
+
+		if (top < req_id) {
+			if ((((top % MAX_PER_FRAME_ARRAY) - (req_id %
+				MAX_PER_FRAME_ARRAY)) >= BATCH_SIZE_MAX) ||
+				(((top % MAX_PER_FRAME_ARRAY) - (req_id %
+				MAX_PER_FRAME_ARRAY)) <= -BATCH_SIZE_MAX))
+				del_req_id = req_id;
+		}
+
+		if (!del_req_id)
+			return rc;
+
+		CAM_DBG(CAM_SENSOR, "top: %llu, del_req_id:%llu",
+			top, del_req_id);
+
+		for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+			if ((del_req_id >
+				 s_ctrl->i2c_data.per_frame[i].request_id) && (
+				 s_ctrl->i2c_data.per_frame[i].is_settings_valid
+					== 1)) {
+				s_ctrl->i2c_data.per_frame[i].request_id = 0;
+				rc = delete_request(
+					&(s_ctrl->i2c_data.per_frame[i]));
+				if (rc < 0)
+					CAM_ERR(CAM_SENSOR,
+						"Delete request Fail:%lld rc:%d",
+						del_req_id, rc);
+			}
+		}
+	}
+
+	return rc;
+}
+
+int32_t cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply)
+{
+	int32_t rc = 0;
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+
+	if (!apply)
+		return -EINVAL;
+
+	s_ctrl = (struct cam_sensor_ctrl_t *)
+		cam_get_device_priv(apply->dev_hdl);
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "Device data is NULL");
+		return -EINVAL;
+	}
+	CAM_DBG(CAM_REQ, " Sensor update req id: %lld", apply->request_id);
+	trace_cam_apply_req("Sensor", apply->request_id);
+	mutex_lock(&(s_ctrl->cam_sensor_mutex));
+	rc = cam_sensor_apply_settings(s_ctrl, apply->request_id,
+		CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE);
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+	return rc;
+}
+
+int32_t cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush_req)
+{
+	int32_t rc = 0, i;
+	uint32_t cancel_req_id_found = 0;
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+	struct i2c_settings_array *i2c_set = NULL;
+
+	if (!flush_req)
+		return -EINVAL;
+
+	s_ctrl = (struct cam_sensor_ctrl_t *)
+		cam_get_device_priv(flush_req->dev_hdl);
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "Device data is NULL");
+		return -EINVAL;
+	}
+
+	if (s_ctrl->i2c_data.per_frame == NULL) {
+		CAM_ERR(CAM_SENSOR, "i2c frame data is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&(s_ctrl->cam_sensor_mutex));
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_ALL) {
+		s_ctrl->last_flush_req = flush_req->req_id;
+		CAM_DBG(CAM_SENSOR, "last reqest to flush is %lld",
+			flush_req->req_id);
+	}
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++) {
+		i2c_set = &(s_ctrl->i2c_data.per_frame[i]);
+
+		if ((flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ)
+				&& (i2c_set->request_id != flush_req->req_id))
+			continue;
+
+		if (i2c_set->is_settings_valid == 1) {
+			rc = delete_request(i2c_set);
+			if (rc < 0)
+				CAM_ERR(CAM_SENSOR,
+					"delete request: %lld rc: %d",
+					i2c_set->request_id, rc);
+
+			if (flush_req->type ==
+				CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ) {
+				cancel_req_id_found = 1;
+				break;
+			}
+		}
+	}
+
+	if (flush_req->type == CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ &&
+		!cancel_req_id_found)
+		CAM_DBG(CAM_SENSOR,
+			"Flush request id:%lld not found in the pending list",
+			flush_req->req_id);
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
new file mode 100644
index 0000000..2889bf9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_core.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SENSOR_CORE_H_
+#define _CAM_SENSOR_CORE_H_
+
+#include "cam_sensor_dev.h"
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * This API powers up the camera sensor module
+ */
+int cam_sensor_power_up(struct cam_sensor_ctrl_t *s_ctrl);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * This API powers down the camera sensor module
+ */
+int cam_sensor_power_down(struct cam_sensor_ctrl_t *s_ctrl);
+
+/**
+ * @sd: V4L2 subdevice
+ * @on: Turn off/on flag
+ *
+ * This API powers down the sensor module
+ */
+int cam_sensor_power(struct v4l2_subdev *sd, int on);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ * @req_id: Request id
+ * @opcode: opcode for settings
+ *
+ * This API applies the req_id settings to sensor
+ */
+int cam_sensor_apply_settings(struct cam_sensor_ctrl_t *s_ctrl, int64_t req_id,
+	enum cam_sensor_packet_opcodes opcode);
+
+/**
+ * @apply: Req mgr structure for applying request
+ *
+ * This API applies the request that is mentioned
+ */
+int cam_sensor_apply_request(struct cam_req_mgr_apply_request *apply);
+
+/**
+ * @flush: Req mgr structure for flushing request
+ *
+ * This API flushes the request that is mentioned
+ */
+int cam_sensor_flush_request(struct cam_req_mgr_flush_request *flush);
+
+/**
+ * @info: Sub device info to req mgr
+ *
+ * Publish the subdevice info
+ */
+int cam_sensor_publish_dev_info(struct cam_req_mgr_device_info *info);
+
+/**
+ * @link: Link setup info
+ *
+ * This API establishes link with sensor subdevice with req mgr
+ */
+int cam_sensor_establish_link(struct cam_req_mgr_core_dev_link_setup *link);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ * @arg:    Camera control command argument
+ *
+ * This API handles the camera control argument reached to sensor
+ */
+int32_t cam_sensor_driver_cmd(struct cam_sensor_ctrl_t *s_ctrl, void *arg);
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * This API handles the camera sensor close/shutdown
+ */
+void cam_sensor_shutdown(struct cam_sensor_ctrl_t *s_ctrl);
+
+#endif /* _CAM_SENSOR_CORE_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
new file mode 100644
index 0000000..a291b5a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_sensor_dev.h"
+#include "cam_req_mgr_dev.h"
+#include "cam_sensor_soc.h"
+#include "cam_sensor_core.h"
+
+static long cam_sensor_subdev_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, void *arg)
+{
+	int rc = 0;
+	struct cam_sensor_ctrl_t *s_ctrl =
+		v4l2_get_subdevdata(sd);
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_sensor_driver_cmd(s_ctrl, arg);
+		break;
+	default:
+		CAM_ERR(CAM_SENSOR, "Invalid ioctl cmd: %d", cmd);
+		rc = -EINVAL;
+		break;
+	}
+	return rc;
+}
+
+static int cam_sensor_subdev_close(struct v4l2_subdev *sd,
+	struct v4l2_subdev_fh *fh)
+{
+	struct cam_sensor_ctrl_t *s_ctrl =
+		v4l2_get_subdevdata(sd);
+
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "s_ctrl ptr is NULL");
+		return -EINVAL;
+	}
+
+	mutex_lock(&(s_ctrl->cam_sensor_mutex));
+	cam_sensor_shutdown(s_ctrl);
+	mutex_unlock(&(s_ctrl->cam_sensor_mutex));
+
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static long cam_sensor_init_subdev_do_ioctl(struct v4l2_subdev *sd,
+	unsigned int cmd, unsigned long arg)
+{
+	struct cam_control cmd_data;
+	int32_t rc = 0;
+
+	if (copy_from_user(&cmd_data, (void __user *)arg,
+		sizeof(cmd_data))) {
+		CAM_ERR(CAM_SENSOR, "Failed to copy from user_ptr=%pK size=%zu",
+			(void __user *)arg, sizeof(cmd_data));
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case VIDIOC_CAM_CONTROL:
+		rc = cam_sensor_subdev_ioctl(sd, cmd, &cmd_data);
+		if (rc < 0)
+			CAM_ERR(CAM_SENSOR, "cam_sensor_subdev_ioctl failed");
+			break;
+	default:
+		CAM_ERR(CAM_SENSOR, "Invalid compat ioctl cmd_type: %d", cmd);
+		rc = -EINVAL;
+	}
+
+	if (!rc) {
+		if (copy_to_user((void __user *)arg, &cmd_data,
+			sizeof(cmd_data))) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to copy to user_ptr=%pK size=%zu",
+				(void __user *)arg, sizeof(cmd_data));
+			rc = -EFAULT;
+		}
+	}
+
+	return rc;
+}
+
+#endif
+static struct v4l2_subdev_core_ops cam_sensor_subdev_core_ops = {
+	.ioctl = cam_sensor_subdev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = cam_sensor_init_subdev_do_ioctl,
+#endif
+	.s_power = cam_sensor_power,
+};
+
+static struct v4l2_subdev_ops cam_sensor_subdev_ops = {
+	.core = &cam_sensor_subdev_core_ops,
+};
+
+static const struct v4l2_subdev_internal_ops cam_sensor_internal_ops = {
+	.close = cam_sensor_subdev_close,
+};
+
+static int cam_sensor_init_subdev_params(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int rc = 0;
+
+	s_ctrl->v4l2_dev_str.internal_ops =
+		&cam_sensor_internal_ops;
+	s_ctrl->v4l2_dev_str.ops =
+		&cam_sensor_subdev_ops;
+	strlcpy(s_ctrl->device_name, CAMX_SENSOR_DEV_NAME,
+		sizeof(s_ctrl->device_name));
+	s_ctrl->v4l2_dev_str.name =
+		s_ctrl->device_name;
+	s_ctrl->v4l2_dev_str.sd_flags =
+		(V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS);
+	s_ctrl->v4l2_dev_str.ent_function =
+		CAM_SENSOR_DEVICE_TYPE;
+	s_ctrl->v4l2_dev_str.token = s_ctrl;
+
+	rc = cam_register_subdev(&(s_ctrl->v4l2_dev_str));
+	if (rc)
+		CAM_ERR(CAM_SENSOR, "Fail with cam_register_subdev rc: %d", rc);
+
+	return rc;
+}
+
+static int32_t cam_sensor_driver_i2c_probe(struct i2c_client *client,
+	const struct i2c_device_id *id)
+{
+	int32_t rc = 0;
+	int i = 0;
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+	struct cam_hw_soc_info   *soc_info = NULL;
+
+	if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+		CAM_ERR(CAM_SENSOR,
+			"%s :i2c_check_functionality failed", client->name);
+		return -EFAULT;
+	}
+
+	/* Create sensor control structure */
+	s_ctrl = kzalloc(sizeof(*s_ctrl), GFP_KERNEL);
+	if (!s_ctrl)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, s_ctrl);
+
+	s_ctrl->io_master_info.client = client;
+	soc_info = &s_ctrl->soc_info;
+	soc_info->dev = &client->dev;
+	soc_info->dev_name = client->name;
+
+	/* Initialize sensor device type */
+	s_ctrl->of_node = client->dev.of_node;
+	s_ctrl->io_master_info.master_type = I2C_MASTER;
+	s_ctrl->is_probe_succeed = 0;
+	s_ctrl->last_flush_req = 0;
+
+	rc = cam_sensor_parse_dt(s_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "cam_sensor_parse_dt rc %d", rc);
+		goto free_s_ctrl;
+	}
+
+	rc = cam_sensor_init_subdev_params(s_ctrl);
+	if (rc)
+		goto free_s_ctrl;
+
+	s_ctrl->i2c_data.per_frame =
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (s_ctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto unreg_subdev;
+	}
+
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head));
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.config_settings.list_head));
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamon_settings.list_head));
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamoff_settings.list_head));
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(s_ctrl->i2c_data.per_frame[i].list_head));
+
+	s_ctrl->bridge_intf.device_hdl = -1;
+	s_ctrl->bridge_intf.link_hdl = -1;
+	s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info;
+	s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link;
+	s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request;
+	s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request;
+
+	s_ctrl->sensordata->power_info.dev = soc_info->dev;
+	v4l2_set_subdevdata(&(s_ctrl->v4l2_dev_str.sd), s_ctrl);
+	return rc;
+unreg_subdev:
+	cam_unregister_subdev(&(s_ctrl->v4l2_dev_str));
+free_s_ctrl:
+	kfree(s_ctrl);
+	return rc;
+}
+
+static int cam_sensor_platform_remove(struct platform_device *pdev)
+{
+	int                        i;
+	struct cam_sensor_ctrl_t  *s_ctrl;
+	struct cam_hw_soc_info    *soc_info;
+
+	s_ctrl = platform_get_drvdata(pdev);
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "sensor device is NULL");
+		return 0;
+	}
+
+	soc_info = &s_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+	kfree(s_ctrl->i2c_data.per_frame);
+	devm_kfree(&pdev->dev, s_ctrl);
+
+	return 0;
+}
+
+static int cam_sensor_driver_i2c_remove(struct i2c_client *client)
+{
+	int                        i;
+	struct cam_sensor_ctrl_t  *s_ctrl = i2c_get_clientdata(client);
+	struct cam_hw_soc_info    *soc_info;
+
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "sensor device is NULL");
+		return 0;
+	}
+
+	soc_info = &s_ctrl->soc_info;
+	for (i = 0; i < soc_info->num_clk; i++)
+		devm_clk_put(soc_info->dev, soc_info->clk[i]);
+
+	kfree(s_ctrl->i2c_data.per_frame);
+	kfree(s_ctrl);
+
+	return 0;
+}
+
+static const struct of_device_id cam_sensor_driver_dt_match[] = {
+	{.compatible = "qcom,cam-sensor"},
+	{}
+};
+
+static int32_t cam_sensor_driver_platform_probe(
+	struct platform_device *pdev)
+{
+	int32_t rc = 0, i = 0;
+	struct cam_sensor_ctrl_t *s_ctrl = NULL;
+	struct cam_hw_soc_info *soc_info = NULL;
+
+	/* Create sensor control structure */
+	s_ctrl = devm_kzalloc(&pdev->dev,
+		sizeof(struct cam_sensor_ctrl_t), GFP_KERNEL);
+	if (!s_ctrl)
+		return -ENOMEM;
+
+	soc_info = &s_ctrl->soc_info;
+	soc_info->pdev = pdev;
+	soc_info->dev = &pdev->dev;
+	soc_info->dev_name = pdev->name;
+
+	/* Initialize sensor device type */
+	s_ctrl->of_node = pdev->dev.of_node;
+	s_ctrl->is_probe_succeed = 0;
+	s_ctrl->last_flush_req = 0;
+
+	/*fill in platform device*/
+	s_ctrl->pdev = pdev;
+
+	s_ctrl->io_master_info.master_type = CCI_MASTER;
+
+	rc = cam_sensor_parse_dt(s_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed: cam_sensor_parse_dt rc %d", rc);
+		goto free_s_ctrl;
+	}
+
+	/* Fill platform device id*/
+	pdev->id = soc_info->index;
+
+	rc = cam_sensor_init_subdev_params(s_ctrl);
+	if (rc)
+		goto free_s_ctrl;
+
+	s_ctrl->i2c_data.per_frame =
+		kzalloc(sizeof(struct i2c_settings_array) *
+		MAX_PER_FRAME_ARRAY, GFP_KERNEL);
+	if (s_ctrl->i2c_data.per_frame == NULL) {
+		rc = -ENOMEM;
+		goto unreg_subdev;
+	}
+
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.init_settings.list_head));
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.config_settings.list_head));
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamon_settings.list_head));
+	INIT_LIST_HEAD(&(s_ctrl->i2c_data.streamoff_settings.list_head));
+
+	for (i = 0; i < MAX_PER_FRAME_ARRAY; i++)
+		INIT_LIST_HEAD(&(s_ctrl->i2c_data.per_frame[i].list_head));
+
+	s_ctrl->bridge_intf.device_hdl = -1;
+	s_ctrl->bridge_intf.link_hdl = -1;
+	s_ctrl->bridge_intf.ops.get_dev_info = cam_sensor_publish_dev_info;
+	s_ctrl->bridge_intf.ops.link_setup = cam_sensor_establish_link;
+	s_ctrl->bridge_intf.ops.apply_req = cam_sensor_apply_request;
+	s_ctrl->bridge_intf.ops.flush_req = cam_sensor_flush_request;
+
+	s_ctrl->sensordata->power_info.dev = &pdev->dev;
+	platform_set_drvdata(pdev, s_ctrl);
+	v4l2_set_subdevdata(&(s_ctrl->v4l2_dev_str.sd), s_ctrl);
+
+	s_ctrl->sensor_state = CAM_SENSOR_INIT;
+
+	return rc;
+unreg_subdev:
+	cam_unregister_subdev(&(s_ctrl->v4l2_dev_str));
+free_s_ctrl:
+	devm_kfree(&pdev->dev, s_ctrl);
+	return rc;
+}
+
+MODULE_DEVICE_TABLE(of, cam_sensor_driver_dt_match);
+
+static struct platform_driver cam_sensor_platform_driver = {
+	.probe = cam_sensor_driver_platform_probe,
+	.driver = {
+		.name = "qcom,camera",
+		.owner = THIS_MODULE,
+		.of_match_table = cam_sensor_driver_dt_match,
+		.suppress_bind_attrs = true,
+	},
+	.remove = cam_sensor_platform_remove,
+};
+
+static const struct i2c_device_id i2c_id[] = {
+	{SENSOR_DRIVER_I2C, (kernel_ulong_t)NULL},
+	{ }
+};
+
+static struct i2c_driver cam_sensor_driver_i2c = {
+	.id_table = i2c_id,
+	.probe = cam_sensor_driver_i2c_probe,
+	.remove = cam_sensor_driver_i2c_remove,
+	.driver = {
+		.name = SENSOR_DRIVER_I2C,
+	},
+};
+
+static int __init cam_sensor_driver_init(void)
+{
+	int32_t rc = 0;
+
+	rc = platform_driver_register(&cam_sensor_platform_driver);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "platform_driver_register Failed: rc = %d",
+			rc);
+		return rc;
+	}
+
+	rc = i2c_add_driver(&cam_sensor_driver_i2c);
+	if (rc)
+		CAM_ERR(CAM_SENSOR, "i2c_add_driver failed rc = %d", rc);
+
+	return rc;
+}
+
+static void __exit cam_sensor_driver_exit(void)
+{
+	platform_driver_unregister(&cam_sensor_platform_driver);
+	i2c_del_driver(&cam_sensor_driver_i2c);
+}
+
+module_init(cam_sensor_driver_init);
+module_exit(cam_sensor_driver_exit);
+MODULE_DESCRIPTION("cam_sensor_driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
new file mode 100644
index 0000000..db84586
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_dev.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SENSOR_DEV_H_
+#define _CAM_SENSOR_DEV_H_
+
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/ion.h>
+#include <linux/iommu.h>
+#include <linux/timer.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <cam_cci_dev.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_subdev.h>
+#include <cam_sensor_io.h>
+#include "cam_debug_util.h"
+
+#define NUM_MASTERS 2
+#define NUM_QUEUES 2
+
+#define TRUE  1
+#define FALSE 0
+
+#undef CDBG
+#ifdef CAM_SENSOR_DEBUG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define SENSOR_DRIVER_I2C "i2c_camera"
+#define CAMX_SENSOR_DEV_NAME "cam-sensor-driver"
+
+enum cam_sensor_state_t {
+	CAM_SENSOR_INIT,
+	CAM_SENSOR_ACQUIRE,
+	CAM_SENSOR_CONFIG,
+	CAM_SENSOR_START,
+};
+
+/**
+ * struct intf_params
+ * @device_hdl: Device Handle
+ * @session_hdl: Session Handle
+ * @link_hdl: Link Handle
+ * @ops: KMD operations
+ * @crm_cb: Callback API pointers
+ */
+struct intf_params {
+	int32_t device_hdl;
+	int32_t session_hdl;
+	int32_t link_hdl;
+	struct cam_req_mgr_kmd_ops ops;
+	struct cam_req_mgr_crm_cb *crm_cb;
+};
+
+/**
+ * struct cam_sensor_ctrl_t: Camera control structure
+ * @pdev: Platform device
+ * @cam_sensor_mutex: Sensor mutex
+ * @sensordata: Sensor board Information
+ * @cci_i2c_master: I2C structure
+ * @io_master_info: Information about the communication master
+ * @sensor_state: Sensor states
+ * @is_probe_succeed: Probe succeeded or not
+ * @id: Cell Index
+ * @of_node: Of node ptr
+ * @v4l2_dev_str: V4L2 device structure
+ * @sensor_probe_addr_type: Sensor probe address type
+ * @sensor_probe_data_type: Sensor probe data type
+ * @i2c_data: Sensor I2C register settings
+ * @sensor_info: Sensor query cap structure
+ * @bridge_intf: Bridge interface structure
+ * @device_name: Sensor device structure
+ * @streamon_count: Count to hold the number of times stream on called
+ * @streamoff_count: Count to hold the number of times stream off called
+ * @bob_reg_index: Hold to BoB regulator index
+ * @bob_pwm_switch: Boolean flag to switch into PWM mode for BoB regulator
+ * @last_flush_req: Last request to flush
+ * @pipeline_delay: Sensor pipeline delay
+ */
+struct cam_sensor_ctrl_t {
+	struct platform_device *pdev;
+	struct cam_hw_soc_info soc_info;
+	struct mutex cam_sensor_mutex;
+	struct cam_sensor_board_info *sensordata;
+	enum cci_i2c_master_t cci_i2c_master;
+	enum cci_device_num cci_num;
+	struct camera_io_master io_master_info;
+	enum cam_sensor_state_t sensor_state;
+	uint8_t is_probe_succeed;
+	uint32_t id;
+	struct device_node *of_node;
+	struct cam_subdev v4l2_dev_str;
+	uint8_t sensor_probe_addr_type;
+	uint8_t sensor_probe_data_type;
+	struct i2c_data_settings i2c_data;
+	struct  cam_sensor_query_cap sensor_info;
+	struct intf_params bridge_intf;
+	char device_name[20];
+	uint32_t streamon_count;
+	uint32_t streamoff_count;
+	int bob_reg_index;
+	bool bob_pwm_switch;
+	uint32_t last_flush_req;
+	uint16_t pipeline_delay;
+};
+
+#endif /* _CAM_SENSOR_DEV_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
new file mode 100644
index 0000000..0633d33
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_sensor_util.h>
+#include <cam_sensor_io.h>
+#include <cam_req_mgr_util.h>
+#include "cam_sensor_soc.h"
+#include "cam_soc_util.h"
+
+int32_t cam_sensor_get_sub_module_index(struct device_node *of_node,
+	struct cam_sensor_board_info *s_info)
+{
+	int rc = 0, i = 0;
+	uint32_t val = 0;
+	struct device_node *src_node = NULL;
+	struct cam_sensor_board_info *sensor_info;
+
+	sensor_info = s_info;
+
+	for (i = 0; i < SUB_MODULE_MAX; i++)
+		sensor_info->subdev_id[i] = -1;
+
+	src_node = of_parse_phandle(of_node, "actuator-src", 0);
+	if (!src_node) {
+		CAM_DBG(CAM_SENSOR, "src_node NULL");
+	} else {
+		rc = of_property_read_u32(src_node, "cell-index", &val);
+		CAM_DBG(CAM_SENSOR, "actuator cell index %d, rc %d", val, rc);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "failed %d", rc);
+			of_node_put(src_node);
+			return rc;
+		}
+		sensor_info->subdev_id[SUB_MODULE_ACTUATOR] = val;
+		of_node_put(src_node);
+	}
+
+	src_node = of_parse_phandle(of_node, "ois-src", 0);
+	if (!src_node) {
+		CAM_DBG(CAM_SENSOR, "src_node NULL");
+	} else {
+		rc = of_property_read_u32(src_node, "cell-index", &val);
+		CAM_DBG(CAM_SENSOR, " ois cell index %d, rc %d", val, rc);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "failed %d",  rc);
+			of_node_put(src_node);
+			return rc;
+		}
+		sensor_info->subdev_id[SUB_MODULE_OIS] = val;
+		of_node_put(src_node);
+	}
+
+	src_node = of_parse_phandle(of_node, "eeprom-src", 0);
+	if (!src_node) {
+		CAM_DBG(CAM_SENSOR, "eeprom src_node NULL");
+	} else {
+		rc = of_property_read_u32(src_node, "cell-index", &val);
+		CAM_DBG(CAM_SENSOR, "eeprom cell index %d, rc %d", val, rc);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "failed %d", rc);
+			of_node_put(src_node);
+			return rc;
+		}
+		sensor_info->subdev_id[SUB_MODULE_EEPROM] = val;
+		of_node_put(src_node);
+	}
+
+	src_node = of_parse_phandle(of_node, "led-flash-src", 0);
+	if (!src_node) {
+		CAM_DBG(CAM_SENSOR, " src_node NULL");
+	} else {
+		rc = of_property_read_u32(src_node, "cell-index", &val);
+		CAM_DBG(CAM_SENSOR, "led flash cell index %d, rc %d", val, rc);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "failed %d", rc);
+			of_node_put(src_node);
+			return rc;
+		}
+		sensor_info->subdev_id[SUB_MODULE_LED_FLASH] = val;
+		of_node_put(src_node);
+	}
+
+	rc = of_property_read_u32(of_node, "csiphy-sd-index", &val);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "paring the dt node for csiphy rc %d", rc);
+	else
+		sensor_info->subdev_id[SUB_MODULE_CSIPHY] = val;
+
+	return rc;
+}
+
+static int32_t cam_sensor_driver_get_dt_data(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t rc = 0;
+	int i = 0;
+	struct cam_sensor_board_info *sensordata = NULL;
+	struct device_node *of_node = s_ctrl->of_node;
+	struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
+
+	s_ctrl->sensordata = kzalloc(sizeof(*sensordata), GFP_KERNEL);
+	if (!s_ctrl->sensordata)
+		return -ENOMEM;
+
+	sensordata = s_ctrl->sensordata;
+
+	rc = cam_soc_util_get_dt_properties(soc_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "Failed to read DT properties rc %d", rc);
+		goto FREE_SENSOR_DATA;
+	}
+
+	rc =  cam_sensor_util_init_gpio_pin_tbl(soc_info,
+			&sensordata->power_info.gpio_num_info);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "Failed to read gpios %d", rc);
+		goto FREE_SENSOR_DATA;
+	}
+
+	s_ctrl->id = soc_info->index;
+
+	/* Validate cell_id */
+	if (s_ctrl->id >= MAX_CAMERAS) {
+		CAM_ERR(CAM_SENSOR, "Failed invalid cell_id %d", s_ctrl->id);
+		rc = -EINVAL;
+		goto FREE_SENSOR_DATA;
+	}
+
+	/* Store the index of BoB regulator if it is available */
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		if (!strcmp(soc_info->rgltr_name[i],
+			"cam_bob")) {
+			CAM_DBG(CAM_SENSOR,
+				"i: %d cam_bob", i);
+			s_ctrl->bob_reg_index = i;
+			soc_info->rgltr[i] = devm_regulator_get(soc_info->dev,
+				soc_info->rgltr_name[i]);
+			if (IS_ERR_OR_NULL(soc_info->rgltr[i])) {
+				CAM_WARN(CAM_SENSOR,
+					"Regulator: %s get failed",
+					soc_info->rgltr_name[i]);
+				soc_info->rgltr[i] = NULL;
+			} else {
+				if (!of_property_read_bool(of_node,
+					"pwm-switch")) {
+					CAM_DBG(CAM_SENSOR,
+					"No BoB PWM switch param defined");
+					s_ctrl->bob_pwm_switch = false;
+				} else {
+					s_ctrl->bob_pwm_switch = true;
+				}
+			}
+		}
+	}
+
+	/* Read subdev info */
+	rc = cam_sensor_get_sub_module_index(of_node, sensordata);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed to get sub module index, rc=%d",
+			 rc);
+		goto FREE_SENSOR_DATA;
+	}
+
+	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+		/* Get CCI master */
+		rc = of_property_read_u32(of_node, "cci-master",
+			&s_ctrl->cci_i2c_master);
+		CAM_DBG(CAM_SENSOR, "cci-master %d, rc %d",
+			s_ctrl->cci_i2c_master, rc);
+		if (rc < 0) {
+			/* Set default master 0 */
+			s_ctrl->cci_i2c_master = MASTER_0;
+			rc = 0;
+		}
+
+		rc = of_property_read_u32(of_node, "cci-device",
+			&s_ctrl->cci_num);
+		CAM_DBG(CAM_SENSOR, "cci-device %d, rc %d",
+			s_ctrl->cci_num, rc);
+		if (rc < 0) {
+			/* Set default master 0 */
+			s_ctrl->cci_num = CCI_DEVICE_0;
+			rc = 0;
+		}
+	}
+
+	if (of_property_read_u32(of_node, "sensor-position-pitch",
+		&sensordata->pos_pitch) < 0) {
+		CAM_DBG(CAM_SENSOR, "Invalid sensor position");
+		sensordata->pos_pitch = 360;
+	}
+	if (of_property_read_u32(of_node, "sensor-position-roll",
+		&sensordata->pos_roll) < 0) {
+		CAM_DBG(CAM_SENSOR, "Invalid sensor position");
+		sensordata->pos_roll = 360;
+	}
+	if (of_property_read_u32(of_node, "sensor-position-yaw",
+		&sensordata->pos_yaw) < 0) {
+		CAM_DBG(CAM_SENSOR, "Invalid sensor position");
+		sensordata->pos_yaw = 360;
+	}
+
+	return rc;
+
+FREE_SENSOR_DATA:
+	kfree(sensordata);
+	return rc;
+}
+
+int32_t msm_sensor_init_default_params(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	/* Validate input parameters */
+	if (!s_ctrl) {
+		CAM_ERR(CAM_SENSOR, "failed: invalid params s_ctrl %pK",
+			s_ctrl);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_SENSOR,
+		"master_type: %d", s_ctrl->io_master_info.master_type);
+	/* Initialize cci_client */
+	if (s_ctrl->io_master_info.master_type == CCI_MASTER) {
+		s_ctrl->io_master_info.cci_client = kzalloc(sizeof(
+			struct cam_sensor_cci_client), GFP_KERNEL);
+		if (!(s_ctrl->io_master_info.cci_client))
+			return -ENOMEM;
+
+		s_ctrl->io_master_info.cci_client->cci_device
+			= s_ctrl->cci_num;
+	} else if (s_ctrl->io_master_info.master_type == I2C_MASTER) {
+		if (!(s_ctrl->io_master_info.client))
+			return -EINVAL;
+	} else {
+		CAM_ERR(CAM_SENSOR,
+			"Invalid master / Master type Not supported");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int32_t cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl)
+{
+	int32_t i, rc = 0;
+	struct cam_hw_soc_info *soc_info = &s_ctrl->soc_info;
+
+	/* Parse dt information and store in sensor control structure */
+	rc = cam_sensor_driver_get_dt_data(s_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "Failed to get dt data rc %d", rc);
+		return rc;
+	}
+
+	/* Initialize mutex */
+	mutex_init(&(s_ctrl->cam_sensor_mutex));
+
+	/* Initialize default parameters */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = devm_clk_get(soc_info->dev,
+					soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			CAM_ERR(CAM_SENSOR, "get failed for %s",
+				 soc_info->clk_name[i]);
+			rc = -ENOENT;
+			return rc;
+		}
+	}
+	rc = msm_sensor_init_default_params(s_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR,
+			"failed: msm_sensor_init_default_params rc %d", rc);
+		goto FREE_DT_DATA;
+	}
+
+	return rc;
+
+FREE_DT_DATA:
+	kfree(s_ctrl->sensordata);
+	s_ctrl->sensordata = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.h
new file mode 100644
index 0000000..726aac2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor/cam_sensor_soc.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SENSOR_SOC_H_
+#define _CAM_SENSOR_SOC_H_
+
+#include "cam_sensor_dev.h"
+
+/**
+ * @s_ctrl: Sensor ctrl structure
+ *
+ * Parses sensor dt
+ */
+int cam_sensor_parse_dt(struct cam_sensor_ctrl_t *s_ctrl);
+
+#endif /* _CAM_SENSOR_SOC_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
new file mode 100644
index 0000000..32064d1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sensor_io.o cam_sensor_cci_i2c.o cam_sensor_qup_i2c.o cam_sensor_spi.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
new file mode 100644
index 0000000..cda3b84
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_cci_i2c.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_sensor_i2c.h"
+#include "cam_cci_dev.h"
+
+int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *cci_client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int32_t rc = -EINVAL;
+	unsigned char buf[CAMERA_SENSOR_I2C_TYPE_DWORD];
+	struct cam_cci_ctrl cci_ctrl;
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		return rc;
+
+	cci_ctrl.cmd = MSM_CCI_I2C_READ;
+	cci_ctrl.cci_info = cci_client;
+	cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+	cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+	cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = data_type;
+	rc = v4l2_subdev_call(cci_client->cci_subdev,
+		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "rc = %d", rc);
+		return rc;
+	}
+
+	rc = cci_ctrl.status;
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+		*data = buf[0];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+		*data = buf[0] << 8 | buf[1];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+		*data = buf[0] << 16 | buf[1] << 8 | buf[2];
+	else
+		*data = buf[0] << 24 | buf[1] << 16 |
+			buf[2] << 8 | buf[3];
+
+	return rc;
+}
+
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *cci_client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t num_byte)
+{
+	int32_t                    rc = -EFAULT;
+	unsigned char             *buf = NULL;
+	int                        i = 0;
+	struct cam_cci_ctrl        cci_ctrl;
+
+	if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (num_byte > I2C_REG_DATA_MAX)) {
+		CAM_ERR(CAM_SENSOR, "addr_type %d num_byte %d", addr_type,
+			num_byte);
+		return rc;
+	}
+
+	buf = kzalloc(num_byte, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	cci_ctrl.cmd = MSM_CCI_I2C_READ;
+	cci_ctrl.cci_info = cci_client;
+	cci_ctrl.cfg.cci_i2c_read_cfg.addr = addr;
+	cci_ctrl.cfg.cci_i2c_read_cfg.addr_type = addr_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data_type = data_type;
+	cci_ctrl.cfg.cci_i2c_read_cfg.data = buf;
+	cci_ctrl.cfg.cci_i2c_read_cfg.num_byte = num_byte;
+	cci_ctrl.status = -EFAULT;
+	rc = v4l2_subdev_call(cci_client->cci_subdev,
+		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	rc = cci_ctrl.status;
+	CAM_DBG(CAM_SENSOR, "addr = 0x%x, rc = %d", addr, rc);
+	for (i = 0; i < num_byte; i++) {
+		data[i] = buf[i];
+		CAM_DBG(CAM_SENSOR, "Byte %d: Data: 0x%x\n", i, data[i]);
+	}
+	kfree(buf);
+	return rc;
+}
+
+static int32_t cam_cci_i2c_write_table_cmd(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting,
+	enum cam_cci_cmd_type cmd)
+{
+	int32_t rc = -EINVAL;
+	struct cam_cci_ctrl cci_ctrl;
+
+	if (!client || !write_setting)
+		return rc;
+
+	if (write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		return rc;
+
+	cci_ctrl.cmd = cmd;
+	cci_ctrl.cci_info = client->cci_client;
+	cci_ctrl.cfg.cci_i2c_write_cfg.reg_setting =
+		write_setting->reg_setting;
+	cci_ctrl.cfg.cci_i2c_write_cfg.data_type = write_setting->data_type;
+	cci_ctrl.cfg.cci_i2c_write_cfg.addr_type = write_setting->addr_type;
+	cci_ctrl.cfg.cci_i2c_write_cfg.size = write_setting->size;
+	rc = v4l2_subdev_call(client->cci_client->cci_subdev,
+		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
+		return rc;
+	}
+	rc = cci_ctrl.status;
+	if (write_setting->delay > 20)
+		msleep(write_setting->delay);
+	else if (write_setting->delay)
+		usleep_range(write_setting->delay * 1000, (write_setting->delay
+			* 1000) + 1000);
+
+	return rc;
+}
+
+
+int32_t cam_cci_i2c_write_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	return cam_cci_i2c_write_table_cmd(client, write_setting,
+		MSM_CCI_I2C_WRITE);
+}
+
+int32_t cam_cci_i2c_write_continuous_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting,
+	uint8_t cam_sensor_i2c_write_flag)
+{
+	int32_t rc = 0;
+
+	if (cam_sensor_i2c_write_flag == 1)
+		rc = cam_cci_i2c_write_table_cmd(client, write_setting,
+			MSM_CCI_I2C_WRITE_BURST);
+	else if (cam_sensor_i2c_write_flag == 0)
+		rc = cam_cci_i2c_write_table_cmd(client, write_setting,
+			MSM_CCI_I2C_WRITE_SEQ);
+
+	return rc;
+}
+
+static int32_t cam_cci_i2c_compare(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type)
+{
+	int32_t rc;
+	uint32_t reg_data = 0;
+
+	rc = cam_cci_i2c_read(client, addr, &reg_data,
+		addr_type, data_type);
+	if (rc < 0)
+		return rc;
+
+	reg_data = reg_data & 0xFFFF;
+	if (data == (reg_data & ~data_mask))
+		return I2C_COMPARE_MATCH;
+	return I2C_COMPARE_MISMATCH;
+}
+
+int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t delay_ms)
+{
+	int32_t rc = -EINVAL;
+	int32_t i = 0;
+
+	CAM_DBG(CAM_SENSOR, "addr: 0x%x data: 0x%x dt: %d",
+		addr, data, data_type);
+
+	if (delay_ms > MAX_POLL_DELAY_MS) {
+		CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+			delay_ms, MAX_POLL_DELAY_MS);
+		return -EINVAL;
+	}
+	for (i = 0; i < delay_ms; i++) {
+		rc = cam_cci_i2c_compare(client,
+			addr, data, data_mask, data_type, addr_type);
+		if (!rc)
+			return rc;
+
+		usleep_range(1000, 1010);
+	}
+
+	/* If rc is 1 then read is successful but poll is failure */
+	if (rc == 1)
+		CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)",	rc);
+
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
+
+	return rc;
+}
+
+int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client,
+	uint16_t cci_cmd)
+{
+	int32_t rc = 0;
+	struct cam_cci_ctrl cci_ctrl;
+
+	cci_ctrl.cmd = cci_cmd;
+	cci_ctrl.cci_info = cci_client;
+	rc = v4l2_subdev_call(cci_client->cci_subdev,
+		core, ioctl, VIDIOC_MSM_CCI_CFG, &cci_ctrl);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "Failed rc = %d", rc);
+		return rc;
+	}
+	return cci_ctrl.status;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
new file mode 100644
index 0000000..6063ac0
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_i2c.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SENSOR_I2C_H_
+#define _CAM_SENSOR_I2C_H_
+
+#include <linux/delay.h>
+#include <media/v4l2-subdev.h>
+#include <media/cam_sensor.h>
+#include <media/cam_sensor.h>
+#include "cam_cci_dev.h"
+#include "cam_sensor_io.h"
+
+#define I2C_POLL_TIME_MS 5
+#define MAX_POLL_DELAY_MS 100
+
+#define I2C_COMPARE_MATCH 0
+#define I2C_COMPARE_MISMATCH 1
+
+#define I2C_REG_DATA_MAX       (8*1024)
+
+/**
+ * @client: CCI client structure
+ * @data: I2C data
+ * @addr_type: I2c address type
+ * @data_type: I2C data type
+ *
+ * This API handles CCI read
+ */
+int32_t cam_cci_i2c_read(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type);
+
+/**
+ * @client: CCI client structure
+ * @addr: I2c address
+ * @data: I2C data
+ * @addr_type: I2c address type
+ * @data_type: I2c data type
+ * @num_byte: number of bytes
+ *
+ * This API handles CCI sequential read
+ */
+int32_t cam_camera_cci_i2c_read_seq(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t num_byte);
+
+/**
+ * @client: CCI client structure
+ * @write_setting: I2C register setting
+ *
+ * This API handles CCI random write
+ */
+int32_t cam_cci_i2c_write_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * @client: CCI client structure
+ * @write_setting: I2C register setting
+ * @cam_sensor_i2c_write_flag: burst or seq write
+ *
+ * This API handles CCI continuous write
+ */
+int32_t cam_cci_i2c_write_continuous_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting,
+	uint8_t cam_sensor_i2c_write_flag);
+
+/**
+ * @cci_client: CCI client structure
+ * @cci_cmd: CCI command type
+ *
+ * Does I2C call to I2C functionalities
+ */
+int32_t cam_sensor_cci_i2c_util(struct cam_sensor_cci_client *cci_client,
+	uint16_t cci_cmd);
+
+/**
+ * @client: CCI client structure
+ * @addr: I2C address
+ * @data: I2C data
+ * @data_mask: I2C data mask
+ * @data_type: I2C data type
+ * @addr_type: I2C addr type
+ * @delay_ms: Delay in milli seconds
+ *
+ * This API implements CCI based I2C poll
+ */
+int32_t cam_cci_i2c_poll(struct cam_sensor_cci_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t delay_ms);
+
+
+/**
+ * cam_qup_i2c_read : QUP based i2c read
+ * @client    : QUP I2C client structure
+ * @data      : I2C data
+ * @addr_type : I2c address type
+ * @data_type : I2C data type
+ *
+ * This API handles QUP I2C read
+ */
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type);
+
+/**
+ * cam_qup_i2c_read_seq : QUP based I2C sequential read
+ * @client    : QUP I2C client structure
+ * @data      : I2C data
+ * @addr_type : I2c address type
+ * @num_bytes : number of bytes to read
+ * This API handles QUP I2C Sequential read
+ */
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte);
+
+/**
+ * cam_qup_i2c_poll : QUP based I2C poll operation
+ * @client    : QUP I2C client structure
+ * @addr      : I2C address
+ * @data      : I2C data
+ * @data_mask : I2C data mask
+ * @data_type : I2C data type
+ * @addr_type : I2C addr type
+ * @delay_ms  : Delay in milli seconds
+ *
+ * This API implements QUP based I2C poll
+ */
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t delay_ms);
+
+/**
+ * cam_qup_i2c_write_table : QUP based I2C write random
+ * @client        : QUP I2C client structure
+ * @write_setting : I2C register settings
+ *
+ * This API handles QUP I2C random write
+ */
+
+int32_t cam_qup_i2c_write_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * cam_qup_i2c_write_continuous_write: QUP based I2C write continuous(Burst/Seq)
+ * @client: QUP I2C client structure
+ * @write_setting: I2C register setting
+ * @cam_sensor_i2c_write_flag: burst or seq write
+ *
+ * This API handles QUP continuous write
+ */
+int32_t cam_qup_i2c_write_continuous_table(
+	struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting,
+	uint8_t cam_sensor_i2c_write_flag);
+
+#endif /*_CAM_SENSOR_I2C_H*/
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
new file mode 100644
index 0000000..410e6cc
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_sensor_io.h"
+#include "cam_sensor_i2c.h"
+
+int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
+	uint32_t addr, uint16_t data, uint32_t data_mask,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t delay_ms)
+{
+	int16_t mask = data_mask & 0xFF;
+
+	if (!io_master_info) {
+		CAM_ERR(CAM_SENSOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_cci_i2c_poll(io_master_info->cci_client,
+			addr, data, mask, data_type, addr_type, delay_ms);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_poll(io_master_info->client,
+			addr, data, data_mask, addr_type, data_type,
+			delay_ms);
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
+		return -EINVAL;
+	}
+}
+
+int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	if (!io_master_info) {
+		CAM_ERR(CAM_SENSOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_cci_i2c_read(io_master_info->cci_client,
+			addr, data, addr_type, data_type);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_read(io_master_info->client,
+			addr, data, addr_type, data_type);
+	} else if (io_master_info->master_type == SPI_MASTER) {
+		return cam_spi_read(io_master_info,
+			addr, data, addr_type, data_type);
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type, int32_t num_bytes)
+{
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_camera_cci_i2c_read_seq(io_master_info->cci_client,
+			addr, data, addr_type, data_type, num_bytes);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_read_seq(io_master_info->client,
+			addr, data, addr_type, num_bytes);
+	} else if (io_master_info->master_type == SPI_MASTER) {
+		return cam_spi_read_seq(io_master_info,
+			addr, data, addr_type, num_bytes);
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	if (!write_setting || !io_master_info) {
+		CAM_ERR(CAM_SENSOR,
+			"Input parameters not valid ws: %pK ioinfo: %pK",
+			write_setting, io_master_info);
+		return -EINVAL;
+	}
+
+	if (!write_setting->reg_setting) {
+		CAM_ERR(CAM_SENSOR, "Invalid Register Settings");
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_cci_i2c_write_table(io_master_info,
+			write_setting);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_write_table(io_master_info,
+			write_setting);
+	} else if (io_master_info->master_type == SPI_MASTER) {
+		return cam_spi_write_table(io_master_info,
+			write_setting);
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
+		return -EINVAL;
+	}
+}
+
+int32_t camera_io_dev_write_continuous(struct camera_io_master *io_master_info,
+	struct cam_sensor_i2c_reg_setting *write_setting,
+	uint8_t cam_sensor_i2c_write_flag)
+{
+	if (!write_setting || !io_master_info) {
+		CAM_ERR(CAM_SENSOR,
+			"Input parameters not valid ws: %pK ioinfo: %pK",
+			write_setting, io_master_info);
+		return -EINVAL;
+	}
+
+	if (!write_setting->reg_setting) {
+		CAM_ERR(CAM_SENSOR, "Invalid Register Settings");
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_cci_i2c_write_continuous_table(io_master_info,
+			write_setting, cam_sensor_i2c_write_flag);
+	} else if (io_master_info->master_type == I2C_MASTER) {
+		return cam_qup_i2c_write_continuous_table(io_master_info,
+			write_setting, cam_sensor_i2c_write_flag);
+	} else if (io_master_info->master_type == SPI_MASTER) {
+		return cam_spi_write_table(io_master_info,
+			write_setting);
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid Comm. Master:%d",
+			io_master_info->master_type);
+		return -EINVAL;
+	}
+}
+
+int32_t camera_io_init(struct camera_io_master *io_master_info)
+{
+	if (!io_master_info) {
+		CAM_ERR(CAM_SENSOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		io_master_info->cci_client->cci_subdev =
+		cam_cci_get_subdev(io_master_info->cci_client->cci_device);
+		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
+			MSM_CCI_INIT);
+	} else if ((io_master_info->master_type == I2C_MASTER) ||
+			(io_master_info->master_type == SPI_MASTER)) {
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int32_t camera_io_release(struct camera_io_master *io_master_info)
+{
+	if (!io_master_info) {
+		CAM_ERR(CAM_SENSOR, "Invalid Args");
+		return -EINVAL;
+	}
+
+	if (io_master_info->master_type == CCI_MASTER) {
+		return cam_sensor_cci_i2c_util(io_master_info->cci_client,
+			MSM_CCI_RELEASE);
+	} else if ((io_master_info->master_type == I2C_MASTER) ||
+			(io_master_info->master_type == SPI_MASTER)) {
+		return 0;
+	}
+
+	return -EINVAL;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
new file mode 100644
index 0000000..eeb03bb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_io.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SENSOR_IO_H_
+#define _CAM_SENSOR_IO_H_
+
+#include <media/cam_sensor.h>
+
+#include "cam_sensor_cmn_header.h"
+
+#define CCI_MASTER 1
+#define I2C_MASTER 2
+#define SPI_MASTER 3
+
+/**
+ * @master_type: CCI master type
+ * @client: I2C client information structure
+ * @cci_client: CCI client information structure
+ * @spi_client: SPI client information structure
+ */
+struct camera_io_master {
+	int master_type;
+	struct i2c_client *client;
+	struct cam_sensor_cci_client *cci_client;
+	struct cam_sensor_spi_client *spi_client;
+};
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @addr_type: I2C addr_type
+ * @data_type: I2C data type
+ *
+ * This API abstracts read functionality based on master type
+ */
+int32_t camera_io_dev_read(struct camera_io_master *io_master_info,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @addr_type: I2C addr type
+ * @data_type: I2C data type
+ * @num_bytes: number of bytes
+ *
+ * This API abstracts read functionality based on master type
+ */
+int32_t camera_io_dev_read_seq(struct camera_io_master *io_master_info,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	int32_t num_bytes);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ *
+ * This API initializes the I2C/SPI master based on master type
+ */
+int32_t camera_io_init(struct camera_io_master *io_master_info);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ *
+ * This API releases the I2C/SPI master based on master type
+ */
+int32_t camera_io_release(struct camera_io_master *io_master_info);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @write_setting: write settings information
+ *
+ * This API abstracts write functionality based on master type
+ */
+int32_t camera_io_dev_write(struct camera_io_master *io_master_info,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @write_setting: write settings information
+ * @cam_sensor_i2c_write_flag: differentiate between burst & seq
+ *
+ * This API abstracts write functionality based on master type and
+ * write flag for continuous write
+ */
+int32_t camera_io_dev_write_continuous(struct camera_io_master *io_master_info,
+	struct cam_sensor_i2c_reg_setting *write_setting,
+	uint8_t cam_sensor_i2c_write_flag);
+
+/**
+ * @io_master_info: I2C/SPI master information
+ * @addr: I2C address
+ * @data: I2C data
+ * @data_mask: I2C data mask
+ * @data_type: I2C data type
+ * @addr_type: I2C address type
+ * @delay_ms: delay in milli seconds
+ *
+ * This API abstracts poll functionality based on master type
+ */
+int32_t camera_io_dev_poll(struct camera_io_master *io_master_info,
+	uint32_t addr, uint16_t data, uint32_t data_mask,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t delay_ms);
+
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_spi.h"
+#endif /* _CAM_SENSOR_IO_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
new file mode 100644
index 0000000..6fec7f8
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_qup_i2c.c
@@ -0,0 +1,530 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_sensor_cmn_header.h"
+#include "cam_sensor_i2c.h"
+#include "cam_sensor_io.h"
+
+#define I2C_REG_DATA_MAX       (8*1024)
+#define I2C_REG_MAX_BUF_SIZE   8
+
+static int32_t cam_qup_i2c_rxdata(
+	struct i2c_client *dev_client, unsigned char *rxdata,
+	enum camera_sensor_i2c_type addr_type,
+	int data_length)
+{
+	int32_t rc = 0;
+	uint16_t saddr = dev_client->addr >> 1;
+	struct i2c_msg msgs[] = {
+		{
+			.addr  = saddr,
+			.flags = 0,
+			.len   = addr_type,
+			.buf   = rxdata,
+		},
+		{
+			.addr  = saddr,
+			.flags = I2C_M_RD,
+			.len   = data_length,
+			.buf   = rxdata,
+		},
+	};
+	rc = i2c_transfer(dev_client->adapter, msgs, 2);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "failed 0x%x", saddr);
+	return rc;
+}
+
+
+static int32_t cam_qup_i2c_txdata(
+	struct camera_io_master *dev_client, unsigned char *txdata,
+	int length)
+{
+	int32_t rc = 0;
+	uint16_t saddr = dev_client->client->addr >> 1;
+	struct i2c_msg msg[] = {
+		{
+			.addr = saddr,
+			.flags = 0,
+			.len = length,
+			.buf = txdata,
+		 },
+	};
+	rc = i2c_transfer(dev_client->client->adapter, msg, 1);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "failed 0x%x", saddr);
+	return rc;
+}
+
+int32_t cam_qup_i2c_read(struct i2c_client *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int32_t rc = -EINVAL;
+	unsigned char *buf = NULL;
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verfication");
+		return rc;
+	}
+
+	buf = kzalloc(addr_type + data_type, GFP_KERNEL);
+
+	if (!buf)
+		return -ENOMEM;
+
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = addr >> 8;
+		buf[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = addr >> 16;
+		buf[1] = addr >> 8;
+		buf[2] = addr;
+	} else {
+		buf[0] = addr >> 24;
+		buf[1] = addr >> 16;
+		buf[2] = addr >> 8;
+		buf[3] = addr;
+	}
+
+	rc = cam_qup_i2c_rxdata(client, buf, addr_type, data_type);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+		goto read_fail;
+	}
+
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+		*data = buf[0];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+		*data = buf[0] << 8 | buf[1];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+		*data = buf[0] << 16 | buf[1] << 8 | buf[2];
+	else
+		*data = buf[0] << 24 | buf[1] << 16 |
+			buf[2] << 8 | buf[3];
+
+	CAM_DBG(CAM_SENSOR, "addr = 0x%x data: 0x%x", addr, *data);
+read_fail:
+	kfree(buf);
+	buf = NULL;
+	return rc;
+}
+
+int32_t cam_qup_i2c_read_seq(struct i2c_client *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte)
+{
+	int32_t rc = -EFAULT;
+	unsigned char *buf = NULL;
+	int i;
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
+		return rc;
+	}
+
+	if ((num_byte == 0) || (num_byte > I2C_REG_DATA_MAX)) {
+		CAM_ERR(CAM_SENSOR, "num_byte:0x%x max supported:0x%x",
+			num_byte, I2C_REG_DATA_MAX);
+		return rc;
+	}
+
+	buf = kzalloc(addr_type + num_byte, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = addr >> BITS_PER_BYTE;
+		buf[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = addr >> 16;
+		buf[1] = addr >> 8;
+		buf[2] = addr;
+	} else {
+		buf[0] = addr >> 24;
+		buf[1] = addr >> 16;
+		buf[2] = addr >> 8;
+		buf[3] = addr;
+	}
+
+	rc = cam_qup_i2c_rxdata(client, buf, addr_type, num_byte);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+		goto read_seq_fail;
+	}
+
+	for (i = 0; i < num_byte; i++)
+		data[i] = buf[i];
+
+read_seq_fail:
+	kfree(buf);
+	buf = NULL;
+	return rc;
+}
+
+static int32_t cam_qup_i2c_compare(struct i2c_client *client,
+	uint32_t addr, uint32_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type data_type,
+	enum camera_sensor_i2c_type addr_type)
+{
+	int32_t rc;
+	uint32_t reg_data = 0;
+
+	rc = cam_qup_i2c_read(client, addr, &reg_data,
+		addr_type, data_type);
+	if (rc < 0)
+		return rc;
+
+	reg_data = reg_data & 0xFFFF;
+	if (data != (reg_data & ~data_mask))
+		return I2C_COMPARE_MISMATCH;
+
+	return I2C_COMPARE_MATCH;
+}
+
+int32_t cam_qup_i2c_poll(struct i2c_client *client,
+	uint32_t addr, uint16_t data, uint16_t data_mask,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type,
+	uint32_t delay_ms)
+{
+	int32_t rc = 0;
+	int i = 0;
+
+	if ((delay_ms > MAX_POLL_DELAY_MS) || (delay_ms == 0)) {
+		CAM_ERR(CAM_SENSOR, "invalid delay = %d max_delay = %d",
+			delay_ms, MAX_POLL_DELAY_MS);
+		return -EINVAL;
+	}
+
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+		return -EINVAL;
+
+	for (i = 0; i < delay_ms; i++) {
+		rc = cam_qup_i2c_compare(client,
+			addr, data, data_mask, data_type, addr_type);
+		if (rc == I2C_COMPARE_MATCH)
+			return rc;
+
+		usleep_range(1000, 1010);
+	}
+	/* If rc is MISMATCH then read is successful but poll is failure */
+	if (rc == I2C_COMPARE_MISMATCH)
+		CAM_ERR(CAM_SENSOR, "poll failed rc=%d(non-fatal)", rc);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "poll failed rc=%d", rc);
+
+	return rc;
+}
+
+static int32_t cam_qup_i2c_write(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_array *reg_setting,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int32_t rc = 0;
+	unsigned char *buf = NULL;
+	uint8_t len = 0;
+
+	buf = kzalloc(I2C_REG_MAX_BUF_SIZE, GFP_KERNEL | GFP_DMA);
+	if (!buf) {
+		CAM_ERR(CAM_SENSOR, "Buffer memory allocation failed");
+		return -ENOMEM;
+	}
+
+	CAM_DBG(CAM_SENSOR, "reg addr = 0x%x data type: %d",
+			reg_setting->reg_addr, data_type);
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = reg_setting->reg_addr;
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+		len = 1;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = reg_setting->reg_addr >> 8;
+		buf[1] = reg_setting->reg_addr;
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len+1, buf[len+1]);
+		len = 2;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = reg_setting->reg_addr >> 16;
+		buf[1] = reg_setting->reg_addr >> 8;
+		buf[2] = reg_setting->reg_addr;
+		len = 3;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+		buf[0] = reg_setting->reg_addr >> 24;
+		buf[1] = reg_setting->reg_addr >> 16;
+		buf[2] = reg_setting->reg_addr >> 8;
+		buf[3] = reg_setting->reg_addr;
+		len = 4;
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid I2C addr type");
+		rc = -EINVAL;
+		goto deallocate_buffer;
+	}
+
+	CAM_DBG(CAM_SENSOR, "Data: 0x%x", reg_setting->reg_data);
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[len] = reg_setting->reg_data;
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+		len += 1;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[len] = reg_setting->reg_data >> 8;
+		buf[len+1] = reg_setting->reg_data;
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+		len += 2;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[len] = reg_setting->reg_data >> 16;
+		buf[len + 1] = reg_setting->reg_data >> 8;
+		buf[len + 2] = reg_setting->reg_data;
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
+		len += 3;
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+		buf[len] = reg_setting->reg_data >> 24;
+		buf[len + 1] = reg_setting->reg_data >> 16;
+		buf[len + 2] = reg_setting->reg_data >> 8;
+		buf[len + 3] = reg_setting->reg_data;
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+1, buf[len+1]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+2, buf[len+2]);
+		CAM_DBG(CAM_SENSOR, "Byte %d: 0x%x", len+3, buf[len+3]);
+		len += 4;
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid Data Type");
+		rc = -EINVAL;
+		goto deallocate_buffer;
+	}
+
+	rc = cam_qup_i2c_txdata(client, buf, len);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+
+deallocate_buffer:
+	kfree(buf);
+	return rc;
+}
+
+int32_t cam_qup_i2c_write_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int32_t rc = -EINVAL;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+
+	if (!client || !write_setting)
+		return rc;
+
+	if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)))
+		return rc;
+
+	reg_setting = write_setting->reg_setting;
+
+	for (i = 0; i < write_setting->size; i++) {
+		CAM_DBG(CAM_SENSOR, "addr 0x%x data 0x%x",
+			reg_setting->reg_addr, reg_setting->reg_data);
+
+		rc = cam_qup_i2c_write(client, reg_setting,
+			write_setting->addr_type, write_setting->data_type);
+		if (rc < 0)
+			break;
+		reg_setting++;
+	}
+
+	if (write_setting->delay > 20)
+		msleep(write_setting->delay);
+	else if (write_setting->delay)
+		usleep_range(write_setting->delay * 1000, (write_setting->delay
+			* 1000) + 1000);
+
+	return rc;
+}
+
+static int32_t cam_qup_i2c_write_seq(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int32_t rc = 0;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+
+	reg_setting = write_setting->reg_setting;
+
+	for (i = 0; i < write_setting->size; i++) {
+		reg_setting->reg_addr += i;
+		rc = cam_qup_i2c_write(client, reg_setting,
+			write_setting->addr_type, write_setting->data_type);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Sequential i2c write failed: rc: %d", rc);
+			break;
+		}
+		reg_setting++;
+	}
+
+	if (write_setting->delay > 20)
+		msleep(write_setting->delay);
+	else if (write_setting->delay)
+		usleep_range(write_setting->delay * 1000, (write_setting->delay
+			* 1000) + 1000);
+
+	return rc;
+}
+
+static int32_t cam_qup_i2c_write_burst(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int32_t rc = 0;
+	uint32_t len = 0;
+	unsigned char *buf = NULL;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+	enum camera_sensor_i2c_type addr_type;
+	enum camera_sensor_i2c_type data_type;
+
+	buf = kzalloc((write_setting->addr_type +
+			(write_setting->size * write_setting->data_type)),
+			GFP_KERNEL);
+
+	if (!buf) {
+		CAM_ERR(CAM_SENSOR, "BUF is NULL");
+		return -ENOMEM;
+	}
+
+	reg_setting = write_setting->reg_setting;
+	addr_type = write_setting->addr_type;
+	data_type = write_setting->data_type;
+
+	CAM_DBG(CAM_SENSOR, "reg addr = 0x%x data type: %d",
+			reg_setting->reg_addr, data_type);
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = reg_setting->reg_addr;
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+		len = 1;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = reg_setting->reg_addr >> 8;
+		buf[1] = reg_setting->reg_addr;
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len, buf[len]);
+		CAM_DBG(CAM_SENSOR, "byte %d: 0x%x", len+1, buf[len+1]);
+		len = 2;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = reg_setting->reg_addr >> 16;
+		buf[1] = reg_setting->reg_addr >> 8;
+		buf[2] = reg_setting->reg_addr;
+		len = 3;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+		buf[0] = reg_setting->reg_addr >> 24;
+		buf[1] = reg_setting->reg_addr >> 16;
+		buf[2] = reg_setting->reg_addr >> 8;
+		buf[3] = reg_setting->reg_addr;
+		len = 4;
+	} else {
+		CAM_ERR(CAM_SENSOR, "Invalid I2C addr type");
+		rc = -EINVAL;
+		goto free_res;
+	}
+
+	for (i = 0; i < write_setting->size; i++) {
+		if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+			buf[len] = reg_setting->reg_data;
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len, buf[len]);
+			len += 1;
+		} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+			buf[len] = reg_setting->reg_data >> 8;
+			buf[len+1] = reg_setting->reg_data;
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len, buf[len]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+1, buf[len+1]);
+			len += 2;
+		} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+			buf[len] = reg_setting->reg_data >> 16;
+			buf[len + 1] = reg_setting->reg_data >> 8;
+			buf[len + 2] = reg_setting->reg_data;
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len, buf[len]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+1, buf[len+1]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+2, buf[len+2]);
+			len += 3;
+		} else if (data_type == CAMERA_SENSOR_I2C_TYPE_DWORD) {
+			buf[len] = reg_setting->reg_data >> 24;
+			buf[len + 1] = reg_setting->reg_data >> 16;
+			buf[len + 2] = reg_setting->reg_data >> 8;
+			buf[len + 3] = reg_setting->reg_data;
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len, buf[len]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+1, buf[len+1]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+2, buf[len+2]);
+			CAM_DBG(CAM_SENSOR,
+				"Byte %d: 0x%x", len+3, buf[len+3]);
+			len += 4;
+		} else {
+			CAM_ERR(CAM_SENSOR, "Invalid Data Type");
+			rc = -EINVAL;
+			goto free_res;
+		}
+		reg_setting++;
+	}
+
+	if (len > (write_setting->addr_type +
+		(write_setting->size * write_setting->data_type))) {
+		CAM_ERR(CAM_SENSOR, "Invalid Length: %u | Expected length: %u",
+			len, (write_setting->addr_type +
+			(write_setting->size * write_setting->data_type)));
+		rc = -EINVAL;
+		goto free_res;
+	}
+
+	rc = cam_qup_i2c_txdata(client, buf, len);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "failed rc: %d", rc);
+
+free_res:
+	kfree(buf);
+	return rc;
+}
+
+int32_t cam_qup_i2c_write_continuous_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_settings,
+	uint8_t cam_sensor_i2c_write_flag)
+{
+	int32_t rc = 0;
+
+	if (!client || !write_settings)
+		return -EINVAL;
+
+	if ((write_settings->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_settings->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| (write_settings->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| write_settings->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX)))
+		return -EINVAL;
+
+	if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_BURST)
+		rc = cam_qup_i2c_write_burst(client, write_settings);
+	else if (cam_sensor_i2c_write_flag == CAM_SENSOR_I2C_WRITE_SEQ)
+		rc = cam_qup_i2c_write_seq(client, write_settings);
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
new file mode 100644
index 0000000..1b4ea0f
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_sensor_spi.h"
+#include "cam_debug_util.h"
+
+static int cam_spi_txfr(struct spi_device *spi, char *txbuf,
+	char *rxbuf, int num_byte)
+{
+	struct spi_transfer txfr;
+	struct spi_message msg;
+
+	memset(&txfr, 0, sizeof(txfr));
+	txfr.tx_buf = txbuf;
+	txfr.rx_buf = rxbuf;
+	txfr.len = num_byte;
+	spi_message_init(&msg);
+	spi_message_add_tail(&txfr, &msg);
+
+	return spi_sync(spi, &msg);
+}
+
+static int cam_spi_txfr_read(struct spi_device *spi, char *txbuf,
+	char *rxbuf, int txlen, int rxlen)
+{
+	struct spi_transfer tx;
+	struct spi_transfer rx;
+	struct spi_message m;
+
+	memset(&tx, 0, sizeof(tx));
+	memset(&rx, 0, sizeof(rx));
+	tx.tx_buf = txbuf;
+	rx.rx_buf = rxbuf;
+	tx.len = txlen;
+	rx.len = rxlen;
+	spi_message_init(&m);
+	spi_message_add_tail(&tx, &m);
+	spi_message_add_tail(&rx, &m);
+	return spi_sync(spi, &m);
+}
+
+/**
+ * cam_set_addr() - helper function to set transfer address
+ * @addr:	device address
+ * @addr_len:	the addr field length of an instruction
+ * @type:	type (i.e. byte-length) of @addr
+ * @str:	shifted address output, must be zeroed when passed in
+ *
+ * This helper function sets @str based on the addr field length of an
+ * instruction and the data length.
+ */
+static void cam_set_addr(uint32_t addr, uint8_t addr_len,
+	enum camera_sensor_i2c_type addr_type,
+	char *str)
+{
+	if (!addr_len)
+		return;
+
+	if (addr_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		str[0] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		str[0] = addr >> 8;
+		str[1] = addr;
+	} else if (addr_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		str[0] = addr >> 16;
+		str[1] = addr >> 8;
+		str[2] = addr;
+	} else {
+		str[0] = addr >> 24;
+		str[1] = addr >> 16;
+		str[2] = addr >> 8;
+		str[3] = addr;
+	}
+}
+
+/**
+ * cam_spi_tx_helper() - wrapper for SPI transaction
+ * @client:     io client
+ * @inst:       inst of this transaction
+ * @addr:       device addr following the inst
+ * @data:       output byte array (could be NULL)
+ * @num_byte:   size of @data
+ * @tx, rx:     optional transfer buffer.  It must be at least header
+ *              + @num_byte long.
+ *
+ * This is the core function for SPI transaction, except for writes.  It first
+ * checks address type, then allocates required memory for tx/rx buffers.
+ * It sends out <opcode><addr>, and optionally receives @num_byte of response,
+ * if @data is not NULL.  This function does not check for wait conditions,
+ * and will return immediately once bus transaction finishes.
+ *
+ * This function will allocate buffers of header + @num_byte long.  For
+ * large transfers, the allocation could fail.  External buffer @tx, @rx
+ * should be passed in to bypass allocation.  The size of buffer should be
+ * at least header + num_byte long.  Since buffer is managed externally,
+ * @data will be ignored, and read results will be in @rx.
+ * @tx, @rx also can be used for repeated transfers to improve performance.
+ */
+static int32_t cam_spi_tx_helper(struct camera_io_master *client,
+	struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte, char *tx, char *rx)
+{
+	int32_t rc = -EINVAL;
+	struct spi_device *spi = client->spi_client->spi_master;
+	struct device *dev = NULL;
+	char *ctx = NULL, *crx = NULL;
+	uint32_t len, hlen;
+	uint8_t retries = client->spi_client->retries;
+	uint32_t txr = 0, rxr = 0;
+	struct page *page_tx = NULL, *page_rx = NULL;
+
+	hlen = cam_camera_spi_get_hlen(inst);
+	len = hlen + num_byte;
+	dev = &(spi->dev);
+
+	if (!dev) {
+		CAM_ERR(CAM_SENSOR, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (tx) {
+		ctx = tx;
+	} else {
+		txr = PAGE_ALIGN(len) >> PAGE_SHIFT;
+		page_tx = cma_alloc(dev_get_cma_area(dev),
+			txr, 0, GFP_KERNEL);
+		if (!page_tx)
+			return -ENOMEM;
+
+		ctx = page_address(page_tx);
+	}
+
+	if (num_byte) {
+		if (rx) {
+			crx = rx;
+		} else {
+			rxr = PAGE_ALIGN(len) >> PAGE_SHIFT;
+			page_rx = cma_alloc(dev_get_cma_area(dev),
+				rxr, 0, GFP_KERNEL);
+			if (!page_rx) {
+				if (!tx)
+					cma_release(dev_get_cma_area(dev),
+						page_tx, txr);
+
+				return -ENOMEM;
+			}
+			crx = page_address(page_rx);
+		}
+	} else {
+		crx = NULL;
+	}
+
+	ctx[0] = inst->opcode;
+	cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+	while ((rc = cam_spi_txfr(spi, ctx, crx, len)) && retries) {
+		retries--;
+		msleep(client->spi_client->retry_delay);
+	}
+	if (rc < 0) {
+		CAM_ERR(CAM_EEPROM, "failed: spi txfr rc %d", rc);
+		goto out;
+	}
+	if (data && num_byte && !rx)
+		memcpy(data, crx + hlen, num_byte);
+
+out:
+	if (!tx)
+		cma_release(dev_get_cma_area(dev), page_tx, txr);
+	if (!rx)
+		cma_release(dev_get_cma_area(dev), page_rx, rxr);
+	return rc;
+}
+
+static int32_t cam_spi_tx_read(struct camera_io_master *client,
+	struct cam_camera_spi_inst *inst, uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint32_t num_byte, char *tx, char *rx)
+{
+	int32_t rc = -EINVAL;
+	struct spi_device *spi = client->spi_client->spi_master;
+	char *ctx = NULL, *crx = NULL;
+	uint32_t hlen;
+	uint8_t retries = client->spi_client->retries;
+
+	hlen = cam_camera_spi_get_hlen(inst);
+	if (tx) {
+		ctx = tx;
+	} else {
+		ctx = kzalloc(hlen, GFP_KERNEL | GFP_DMA);
+		if (!ctx)
+			return -ENOMEM;
+	}
+	if (num_byte) {
+		if (rx) {
+			crx = rx;
+		} else {
+			crx = kzalloc(num_byte, GFP_KERNEL | GFP_DMA);
+			if (!crx) {
+				if (!tx)
+					kfree(ctx);
+				return -ENOMEM;
+			}
+		}
+	} else {
+		crx = NULL;
+	}
+
+	ctx[0] = inst->opcode;
+	cam_set_addr(addr, inst->addr_len, addr_type, ctx + 1);
+
+	CAM_DBG(CAM_EEPROM, "tx(%u): %02x %02x %02x %02x", hlen, ctx[0],
+		ctx[1], ctx[2],	ctx[3]);
+	while ((rc = cam_spi_txfr_read(spi, ctx, crx, hlen, num_byte))
+			&& retries) {
+		retries--;
+		msleep(client->spi_client->retry_delay);
+	}
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed %d", rc);
+		goto out;
+	}
+	if (data && num_byte && !rx)
+		memcpy(data, crx, num_byte);
+out:
+	if (!tx)
+		kfree(ctx);
+	if (!rx)
+		kfree(crx);
+	return rc;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	int rc = -EINVAL;
+	uint8_t temp[CAMERA_SENSOR_I2C_TYPE_MAX];
+
+	if (addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX
+		|| data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID
+		|| data_type >= CAMERA_SENSOR_I2C_TYPE_MAX) {
+		CAM_ERR(CAM_SENSOR, "Failed with addr/data_type verification");
+		return rc;
+	}
+
+	rc = cam_spi_tx_read(client,
+		&client->spi_client->cmd_tbl.read, addr, &temp[0],
+		addr_type, data_type, NULL, NULL);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed %d", rc);
+		return rc;
+	}
+
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE)
+		*data = temp[0];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD)
+		*data = (temp[0] << BITS_PER_BYTE) | temp[1];
+	else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B)
+		*data = (temp[0] << 16 | temp[1] << 8 | temp[2]);
+	else
+		*data = (temp[0] << 24 | temp[1] << 16 | temp[2] << 8 |
+			temp[3]);
+
+	CAM_DBG(CAM_SENSOR, "addr 0x%x, data %u", addr, *data);
+	return rc;
+}
+
+int32_t cam_spi_read_seq(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type, int32_t num_bytes)
+{
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)) {
+		CAM_ERR(CAM_SENSOR, "Failed with addr_type verification");
+		return -EINVAL;
+	}
+
+	if (num_bytes == 0) {
+		CAM_ERR(CAM_SENSOR, "num_byte: 0x%x", num_bytes);
+		return -EINVAL;
+	}
+
+	return cam_spi_tx_helper(client,
+		&client->spi_client->cmd_tbl.read_seq, addr, data,
+		addr_type, num_bytes, NULL, NULL);
+}
+
+int cam_spi_query_id(struct camera_io_master *client,
+	uint32_t addr, enum camera_sensor_i2c_type addr_type,
+	uint8_t *data, uint32_t num_byte)
+{
+	return cam_spi_tx_helper(client,
+		&client->spi_client->cmd_tbl.query_id,
+		addr, data, addr_type, num_byte, NULL, NULL);
+}
+
+static int32_t cam_spi_read_status_reg(
+	struct camera_io_master *client, uint8_t *status,
+	enum camera_sensor_i2c_type addr_type)
+{
+	struct cam_camera_spi_inst *rs =
+		&client->spi_client->cmd_tbl.read_status;
+
+	if (rs->addr_len != 0) {
+		CAM_ERR(CAM_SENSOR, "not implemented yet");
+		return -ENXIO;
+	}
+	return cam_spi_tx_helper(client, rs, 0, status,
+		addr_type, 1, NULL, NULL);
+}
+
+static int32_t cam_spi_device_busy(struct camera_io_master *client,
+	uint8_t *busy, enum camera_sensor_i2c_type addr_type)
+{
+	int rc;
+	uint8_t st = 0;
+
+	rc = cam_spi_read_status_reg(client, &st, addr_type);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed to read status reg");
+		return rc;
+	}
+	*busy = st & client->spi_client->busy_mask;
+	return 0;
+}
+
+static int32_t cam_spi_wait(struct camera_io_master *client,
+	struct cam_camera_spi_inst *inst,
+	enum camera_sensor_i2c_type addr_type)
+{
+	uint8_t busy;
+	int i, rc;
+
+	CAM_DBG(CAM_SENSOR, "op 0x%x wait start", inst->opcode);
+	for (i = 0; i < inst->delay_count; i++) {
+		rc = cam_spi_device_busy(client, &busy, addr_type);
+		if (rc < 0)
+			return rc;
+		if (!busy)
+			break;
+		msleep(inst->delay_intv);
+		CAM_DBG(CAM_SENSOR, "op 0x%x wait", inst->opcode);
+	}
+	if (i > inst->delay_count) {
+		CAM_ERR(CAM_SENSOR, "op %x timed out", inst->opcode);
+		return -ETIMEDOUT;
+	}
+	CAM_DBG(CAM_SENSOR, "op %x finished", inst->opcode);
+	return 0;
+}
+
+static int32_t cam_spi_write_enable(struct camera_io_master *client,
+	enum camera_sensor_i2c_type addr_type)
+{
+	struct cam_camera_spi_inst *we =
+		&client->spi_client->cmd_tbl.write_enable;
+	int rc;
+
+	if (we->opcode == 0)
+		return 0;
+	if (we->addr_len != 0) {
+		CAM_ERR(CAM_SENSOR, "not implemented yet");
+		return -EINVAL;
+	}
+	rc = cam_spi_tx_helper(client, we, 0, NULL, addr_type,
+		0, NULL, NULL);
+	if (rc < 0)
+		CAM_ERR(CAM_SENSOR, "write enable failed");
+	return rc;
+}
+
+/**
+ * cam_spi_page_program() - core function to perform write
+ * @client: need for obtaining SPI device
+ * @addr: address to program on device
+ * @data: data to write
+ * @len: size of data
+ * @tx: tx buffer, size >= header + len
+ *
+ * This function performs SPI write, and has no boundary check.  Writing range
+ * should not cross page boundary, or data will be corrupted.  Transaction is
+ * guaranteed to be finished when it returns.  This function should never be
+ * used outside cam_spi_write_seq().
+ */
+static int32_t cam_spi_page_program(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	uint16_t len, uint8_t *tx)
+{
+	int rc;
+	struct cam_camera_spi_inst *pg =
+		&client->spi_client->cmd_tbl.page_program;
+	struct spi_device *spi = client->spi_client->spi_master;
+	uint8_t retries = client->spi_client->retries;
+	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+
+	CAM_DBG(CAM_SENSOR, "addr 0x%x, size 0x%x", addr, len);
+	rc = cam_spi_write_enable(client, addr_type);
+	if (rc < 0)
+		return rc;
+	memset(tx, 0, header_len);
+	tx[0] = pg->opcode;
+	cam_set_addr(addr, pg->addr_len, addr_type, tx + 1);
+	memcpy(tx + header_len, data, len);
+	CAM_DBG(CAM_SENSOR, "tx(%u): %02x %02x %02x %02x",
+		len, tx[0], tx[1], tx[2], tx[3]);
+	while ((rc = spi_write(spi, tx, len + header_len)) && retries) {
+		rc = cam_spi_wait(client, pg, addr_type);
+		msleep(client->spi_client->retry_delay);
+		retries--;
+	}
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed %d", rc);
+		return rc;
+	}
+	rc = cam_spi_wait(client, pg, addr_type);
+		return rc;
+}
+
+int cam_spi_write(struct camera_io_master *client,
+	uint32_t addr, uint32_t data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type)
+{
+	struct cam_camera_spi_inst *pg =
+		&client->spi_client->cmd_tbl.page_program;
+	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+	uint16_t len = 0;
+	char buf[CAMERA_SENSOR_I2C_TYPE_MAX];
+	char *tx;
+	int rc = -EINVAL;
+
+	if ((addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (data_type != CAMERA_SENSOR_I2C_TYPE_MAX))
+		return rc;
+
+	CAM_DBG(CAM_EEPROM, "Data: 0x%x", data);
+	len = header_len + (uint8_t)data_type;
+	tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+	if (!tx)
+		goto NOMEM;
+
+	if (data_type == CAMERA_SENSOR_I2C_TYPE_BYTE) {
+		buf[0] = data;
+		CAM_DBG(CAM_EEPROM, "Byte %d: 0x%x", len, buf[0]);
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_WORD) {
+		buf[0] = (data >> BITS_PER_BYTE) & 0x00FF;
+		buf[1] = (data & 0x00FF);
+	} else if (data_type == CAMERA_SENSOR_I2C_TYPE_3B) {
+		buf[0] = (data >> 16) & 0x00FF;
+		buf[1] = (data >> 8) & 0x00FF;
+		buf[2] = (data & 0x00FF);
+	} else {
+		buf[0] = (data >> 24) & 0x00FF;
+		buf[1] = (data >> 16) & 0x00FF;
+		buf[2] = (data >> 8) & 0x00FF;
+		buf[3] = (data & 0x00FF);
+	}
+
+	rc = cam_spi_page_program(client, addr, buf,
+		addr_type, data_type, tx);
+	if (rc < 0)
+		goto ERROR;
+	goto OUT;
+NOMEM:
+	CAM_ERR(CAM_SENSOR, "memory allocation failed");
+	return -ENOMEM;
+ERROR:
+	CAM_ERR(CAM_SENSOR, "error write");
+OUT:
+	kfree(tx);
+	return rc;
+}
+
+int32_t cam_spi_write_seq(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type, uint32_t num_byte)
+{
+	struct cam_camera_spi_inst *pg =
+		&client->spi_client->cmd_tbl.page_program;
+	const uint32_t page_size = client->spi_client->page_size;
+	uint8_t header_len = sizeof(pg->opcode) + pg->addr_len + pg->dummy_len;
+	uint16_t len;
+	uint32_t cur_len, end;
+	char *tx, *pdata = data;
+	int rc = -EINVAL;
+
+	if ((addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX) ||
+		(addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID))
+		return rc;
+    /* single page write */
+	if ((addr % page_size) + num_byte <= page_size) {
+		len = header_len + num_byte;
+		tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+		if (!tx)
+			goto NOMEM;
+		rc = cam_spi_page_program(client, addr, data, addr_type,
+			num_byte, tx);
+		if (rc < 0)
+			goto ERROR;
+		goto OUT;
+	}
+	/* multi page write */
+	len = header_len + page_size;
+	tx = kmalloc(len, GFP_KERNEL | GFP_DMA);
+	if (!tx)
+		goto NOMEM;
+	while (num_byte) {
+		end = min(page_size, (addr % page_size) + num_byte);
+		cur_len = end - (addr % page_size);
+		CAM_ERR(CAM_SENSOR, "Addr: 0x%x curr_len: 0x%x pgSize: %d",
+			addr, cur_len, page_size);
+		rc = cam_spi_page_program(client, addr, pdata, addr_type,
+			cur_len, tx);
+		if (rc < 0)
+			goto ERROR;
+		addr += cur_len;
+		pdata += cur_len;
+		num_byte -= cur_len;
+	}
+	goto OUT;
+NOMEM:
+	pr_err("%s: memory allocation failed\n", __func__);
+	return -ENOMEM;
+ERROR:
+	pr_err("%s: error write\n", __func__);
+OUT:
+	kfree(tx);
+	return rc;
+}
+
+int cam_spi_write_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting)
+{
+	int i;
+	int rc = -EFAULT;
+	struct cam_sensor_i2c_reg_array *reg_setting;
+	uint16_t client_addr_type;
+	enum camera_sensor_i2c_type addr_type;
+
+	if (!client || !write_setting)
+		return rc;
+
+	if ((write_setting->addr_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (write_setting->addr_type >= CAMERA_SENSOR_I2C_TYPE_MAX)
+		|| (write_setting->data_type <= CAMERA_SENSOR_I2C_TYPE_INVALID)
+		|| (write_setting->data_type >= CAMERA_SENSOR_I2C_TYPE_MAX))
+		return rc;
+
+	reg_setting = write_setting->reg_setting;
+	client_addr_type = write_setting->addr_type;
+	addr_type = write_setting->addr_type;
+	for (i = 0; i < write_setting->size; i++) {
+		CAM_DBG(CAM_SENSOR, "addr %x data %x",
+			reg_setting->reg_addr, reg_setting->reg_data);
+		rc = cam_spi_write(client,
+			reg_setting->reg_addr, reg_setting->reg_data,
+			write_setting->addr_type, write_setting->data_type);
+		if (rc < 0)
+			break;
+		reg_setting++;
+	}
+		if (write_setting->delay > 20)
+			msleep(write_setting->delay);
+		else if (write_setting->delay)
+			usleep_range(write_setting->delay * 1000,
+			(write_setting->delay
+			* 1000) + 1000);
+	addr_type = client_addr_type;
+	return rc;
+}
+
+int cam_spi_erase(struct camera_io_master *client, uint32_t addr,
+	enum camera_sensor_i2c_type addr_type, uint32_t size)
+{
+	struct cam_camera_spi_inst *se = &client->spi_client->cmd_tbl.erase;
+	int rc = 0;
+	uint32_t cur;
+	uint32_t end = addr + size;
+	uint32_t erase_size = client->spi_client->erase_size;
+
+	end = addr + size;
+	for (cur = rounddown(addr, erase_size); cur < end; cur += erase_size) {
+		CAM_ERR(CAM_SENSOR, "%s: erasing 0x%x size: %d\n",
+			__func__, cur, erase_size);
+		rc = cam_spi_write_enable(client, addr_type);
+		if (rc < 0)
+			return rc;
+		rc = cam_spi_tx_helper(client, se, cur, NULL, addr_type, 0,
+			NULL, NULL);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "%s: erase failed\n", __func__);
+			return rc;
+		}
+		rc = cam_spi_wait(client, se, addr_type);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "%s: erase timedout\n", __func__);
+			return rc;
+		}
+	}
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
new file mode 100644
index 0000000..6260987
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io/cam_sensor_spi.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SENSOR_SPI_H_
+#define _CAM_SENSOR_SPI_H_
+
+#include <linux/spi/spi.h>
+#include <linux/cma.h>
+#include <linux/dma-contiguous.h>
+#include <media/cam_sensor.h>
+#include "cam_sensor_i2c.h"
+
+#define MAX_SPI_SIZE 110
+#define SPI_DYNAMIC_ALLOC
+
+struct cam_camera_spi_inst {
+	uint8_t opcode;
+	uint8_t addr_len;
+	uint8_t dummy_len;
+	uint8_t delay_intv;
+	uint8_t delay_count;
+};
+
+struct cam_spi_write_burst_data {
+	u8 data_msb;
+	u8 data_lsb;
+};
+
+struct cam_spi_write_burst_packet {
+	u8 cmd;
+	u8 addr_msb;
+	u8 addr_lsb;
+	struct cam_spi_write_burst_data data_arr[MAX_SPI_SIZE];
+};
+
+struct cam_camera_burst_info {
+	uint32_t burst_addr;
+	uint32_t burst_start;
+	uint32_t burst_len;
+	uint32_t chunk_size;
+};
+
+struct cam_camera_spi_inst_tbl {
+	struct cam_camera_spi_inst read;
+	struct cam_camera_spi_inst read_seq;
+	struct cam_camera_spi_inst query_id;
+	struct cam_camera_spi_inst page_program;
+	struct cam_camera_spi_inst write_enable;
+	struct cam_camera_spi_inst read_status;
+	struct cam_camera_spi_inst erase;
+};
+
+struct cam_sensor_spi_client {
+	struct spi_device *spi_master;
+	struct cam_camera_spi_inst_tbl cmd_tbl;
+	uint8_t device_id0;
+	uint8_t device_id1;
+	uint8_t mfr_id0;
+	uint8_t mfr_id1;
+	uint8_t retry_delay;
+	uint8_t retries;
+	uint8_t busy_mask;
+	uint16_t page_size;
+	uint32_t erase_size;
+};
+static __always_inline
+uint16_t cam_camera_spi_get_hlen(struct cam_camera_spi_inst *inst)
+{
+	return sizeof(inst->opcode) + inst->addr_len + inst->dummy_len;
+}
+
+int cam_spi_read(struct camera_io_master *client,
+	uint32_t addr, uint32_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type);
+
+int cam_spi_read_seq(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type,
+	int32_t num_bytes);
+
+int cam_spi_query_id(struct camera_io_master *client,
+	uint32_t addr,
+	enum camera_sensor_i2c_type addr_type,
+	uint8_t *data, uint32_t num_byte);
+
+int cam_spi_write(struct camera_io_master *client,
+	uint32_t addr, uint32_t data,
+	enum camera_sensor_i2c_type addr_type,
+	enum camera_sensor_i2c_type data_type);
+
+int cam_spi_write_table(struct camera_io_master *client,
+	struct cam_sensor_i2c_reg_setting *write_setting);
+
+int cam_spi_erase(struct camera_io_master *client,
+	uint32_t addr, enum camera_sensor_i2c_type addr_type,
+	uint32_t size);
+
+int cam_spi_write_seq(struct camera_io_master *client,
+	uint32_t addr, uint8_t *data,
+	enum camera_sensor_i2c_type addr_type, uint32_t num_byte);
+#endif
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
new file mode 100644
index 0000000..091ace1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_io
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_cci
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_sensor_module/cam_res_mgr
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
+
+obj-$(CONFIG_SPECTRA_CAMERA) +=  cam_sensor_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
new file mode 100644
index 0000000..aa4fb99
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_cmn_header.h
@@ -0,0 +1,383 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SENSOR_CMN_HEADER_
+#define _CAM_SENSOR_CMN_HEADER_
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <media/cam_sensor.h>
+#include <media/cam_req_mgr.h>
+
+#define MAX_REGULATOR 5
+#define MAX_POWER_CONFIG 12
+
+#define MAX_PER_FRAME_ARRAY 32
+#define BATCH_SIZE_MAX      16
+
+#define CAM_SENSOR_NAME    "cam-sensor"
+#define CAM_ACTUATOR_NAME  "cam-actuator"
+#define CAM_CSIPHY_NAME    "cam-csiphy"
+#define CAM_FLASH_NAME     "cam-flash"
+#define CAM_EEPROM_NAME    "cam-eeprom"
+#define CAM_OIS_NAME       "cam-ois"
+
+#define MAX_SYSTEM_PIPELINE_DELAY 2
+
+#define CAM_PKT_NOP_OPCODE 127
+
+enum camera_sensor_cmd_type {
+	CAMERA_SENSOR_CMD_TYPE_INVALID,
+	CAMERA_SENSOR_CMD_TYPE_PROBE,
+	CAMERA_SENSOR_CMD_TYPE_PWR_UP,
+	CAMERA_SENSOR_CMD_TYPE_PWR_DOWN,
+	CAMERA_SENSOR_CMD_TYPE_I2C_INFO,
+	CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR,
+	CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_RD,
+	CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR,
+	CAMERA_SENSOR_CMD_TYPE_I2C_CONT_RD,
+	CAMERA_SENSOR_CMD_TYPE_WAIT,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_INFO,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_FIRE,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_RER,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_QUERYCURR,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_WIDGET,
+	CAMERA_SENSOR_CMD_TYPE_RD_DATA,
+	CAMERA_SENSOR_FLASH_CMD_TYPE_INIT_FIRE,
+	CAMERA_SENSOR_CMD_TYPE_MAX,
+};
+
+enum camera_sensor_i2c_op_code {
+	CAMERA_SENSOR_I2C_OP_INVALID,
+	CAMERA_SENSOR_I2C_OP_RNDM_WR,
+	CAMERA_SENSOR_I2C_OP_RNDM_WR_VERF,
+	CAMERA_SENSOR_I2C_OP_CONT_WR_BRST,
+	CAMERA_SENSOR_I2C_OP_CONT_WR_BRST_VERF,
+	CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN,
+	CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN_VERF,
+	CAMERA_SENSOR_I2C_OP_MAX,
+};
+
+enum camera_sensor_wait_op_code {
+	CAMERA_SENSOR_WAIT_OP_INVALID,
+	CAMERA_SENSOR_WAIT_OP_COND,
+	CAMERA_SENSOR_WAIT_OP_HW_UCND,
+	CAMERA_SENSOR_WAIT_OP_SW_UCND,
+	CAMERA_SENSOR_WAIT_OP_MAX,
+};
+
+enum camera_flash_opcode {
+	CAMERA_SENSOR_FLASH_OP_INVALID,
+	CAMERA_SENSOR_FLASH_OP_OFF,
+	CAMERA_SENSOR_FLASH_OP_FIRELOW,
+	CAMERA_SENSOR_FLASH_OP_FIREHIGH,
+	CAMERA_SENSOR_FLASH_OP_MAX,
+};
+
+enum camera_sensor_i2c_type {
+	CAMERA_SENSOR_I2C_TYPE_INVALID,
+	CAMERA_SENSOR_I2C_TYPE_BYTE,
+	CAMERA_SENSOR_I2C_TYPE_WORD,
+	CAMERA_SENSOR_I2C_TYPE_3B,
+	CAMERA_SENSOR_I2C_TYPE_DWORD,
+	CAMERA_SENSOR_I2C_TYPE_MAX,
+};
+
+enum i2c_freq_mode {
+	I2C_STANDARD_MODE,
+	I2C_FAST_MODE,
+	I2C_CUSTOM_MODE,
+	I2C_FAST_PLUS_MODE,
+	I2C_MAX_MODES,
+};
+
+enum position_roll {
+	ROLL_0       = 0,
+	ROLL_90      = 90,
+	ROLL_180     = 180,
+	ROLL_270     = 270,
+	ROLL_INVALID = 360,
+};
+
+enum position_yaw {
+	FRONT_CAMERA_YAW = 0,
+	REAR_CAMERA_YAW  = 180,
+	INVALID_YAW      = 360,
+};
+
+enum position_pitch {
+	LEVEL_PITCH    = 0,
+	INVALID_PITCH  = 360,
+};
+
+enum sensor_sub_module {
+	SUB_MODULE_SENSOR,
+	SUB_MODULE_ACTUATOR,
+	SUB_MODULE_EEPROM,
+	SUB_MODULE_LED_FLASH,
+	SUB_MODULE_CSID,
+	SUB_MODULE_CSIPHY,
+	SUB_MODULE_OIS,
+	SUB_MODULE_EXT,
+	SUB_MODULE_MAX,
+};
+
+enum msm_camera_power_seq_type {
+	SENSOR_MCLK,
+	SENSOR_VANA,
+	SENSOR_VDIG,
+	SENSOR_VIO,
+	SENSOR_VAF,
+	SENSOR_VAF_PWDM,
+	SENSOR_CUSTOM_REG1,
+	SENSOR_CUSTOM_REG2,
+	SENSOR_RESET,
+	SENSOR_STANDBY,
+	SENSOR_CUSTOM_GPIO1,
+	SENSOR_CUSTOM_GPIO2,
+	SENSOR_SEQ_TYPE_MAX,
+};
+
+enum cam_sensor_packet_opcodes {
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMON,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_UPDATE,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_INITIAL_CONFIG,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_PROBE,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_CONFIG,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_STREAMOFF,
+	CAM_SENSOR_PACKET_OPCODE_SENSOR_NOP = 127
+};
+
+enum cam_actuator_packet_opcodes {
+	CAM_ACTUATOR_PACKET_OPCODE_INIT,
+	CAM_ACTUATOR_PACKET_AUTO_MOVE_LENS,
+	CAM_ACTUATOR_PACKET_MANUAL_MOVE_LENS
+};
+
+enum cam_eeprom_packet_opcodes {
+	CAM_EEPROM_PACKET_OPCODE_INIT
+};
+
+enum cam_ois_packet_opcodes {
+	CAM_OIS_PACKET_OPCODE_INIT,
+	CAM_OIS_PACKET_OPCODE_OIS_CONTROL
+};
+
+enum msm_bus_perf_setting {
+	S_INIT,
+	S_PREVIEW,
+	S_VIDEO,
+	S_CAPTURE,
+	S_ZSL,
+	S_STEREO_VIDEO,
+	S_STEREO_CAPTURE,
+	S_DEFAULT,
+	S_LIVESHOT,
+	S_DUAL,
+	S_EXIT
+};
+
+enum msm_camera_device_type_t {
+	MSM_CAMERA_I2C_DEVICE,
+	MSM_CAMERA_PLATFORM_DEVICE,
+	MSM_CAMERA_SPI_DEVICE,
+};
+
+enum cam_flash_device_type {
+	CAMERA_FLASH_DEVICE_TYPE_PMIC = 0,
+	CAMERA_FLASH_DEVICE_TYPE_I2C,
+	CAMERA_FLASH_DEVICE_TYPE_GPIO,
+};
+
+enum cci_i2c_master_t {
+	MASTER_0,
+	MASTER_1,
+	MASTER_MAX,
+};
+
+enum cci_device_num {
+	CCI_DEVICE_0,
+	CCI_DEVICE_1,
+	CCI_DEVICE_MAX,
+};
+
+enum camera_vreg_type {
+	VREG_TYPE_DEFAULT,
+	VREG_TYPE_CUSTOM,
+};
+
+enum cam_sensor_i2c_cmd_type {
+	CAM_SENSOR_I2C_WRITE_RANDOM,
+	CAM_SENSOR_I2C_WRITE_BURST,
+	CAM_SENSOR_I2C_WRITE_SEQ,
+	CAM_SENSOR_I2C_READ,
+	CAM_SENSOR_I2C_POLL
+};
+
+struct common_header {
+	uint16_t    first_word;
+	uint8_t     third_byte;
+	uint8_t     cmd_type;
+};
+
+struct camera_vreg_t {
+	const char *reg_name;
+	int min_voltage;
+	int max_voltage;
+	int op_mode;
+	uint32_t delay;
+	const char *custom_vreg_name;
+	enum camera_vreg_type type;
+};
+
+struct msm_camera_gpio_num_info {
+	uint16_t gpio_num[SENSOR_SEQ_TYPE_MAX];
+	uint8_t valid[SENSOR_SEQ_TYPE_MAX];
+};
+
+struct msm_cam_clk_info {
+	const char *clk_name;
+	long clk_rate;
+	uint32_t delay;
+};
+
+struct msm_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+	bool use_pinctrl;
+};
+
+struct cam_sensor_i2c_reg_array {
+	uint32_t reg_addr;
+	uint32_t reg_data;
+	uint32_t delay;
+	uint32_t data_mask;
+};
+
+struct cam_sensor_i2c_reg_setting {
+	struct cam_sensor_i2c_reg_array *reg_setting;
+	unsigned short size;
+	enum camera_sensor_i2c_type addr_type;
+	enum camera_sensor_i2c_type data_type;
+	unsigned short delay;
+};
+
+struct i2c_settings_list {
+	struct cam_sensor_i2c_reg_setting i2c_settings;
+	enum cam_sensor_i2c_cmd_type op_code;
+	struct list_head list;
+};
+
+struct i2c_settings_array {
+	struct list_head list_head;
+	int32_t is_settings_valid;
+	int64_t request_id;
+};
+
+struct i2c_data_settings {
+	struct i2c_settings_array init_settings;
+	struct i2c_settings_array config_settings;
+	struct i2c_settings_array streamon_settings;
+	struct i2c_settings_array streamoff_settings;
+	struct i2c_settings_array *per_frame;
+};
+
+struct cam_sensor_power_ctrl_t {
+	struct device *dev;
+	struct cam_sensor_power_setting *power_setting;
+	uint16_t power_setting_size;
+	struct cam_sensor_power_setting *power_down_setting;
+	uint16_t power_down_setting_size;
+	struct msm_camera_gpio_num_info *gpio_num_info;
+	struct msm_pinctrl_info pinctrl_info;
+	uint8_t cam_pinctrl_status;
+};
+
+struct cam_camera_slave_info {
+	uint16_t sensor_slave_addr;
+	uint16_t sensor_id_reg_addr;
+	uint16_t sensor_id;
+	uint16_t sensor_id_mask;
+};
+
+struct msm_sensor_init_params {
+	int modes_supported;
+	unsigned int sensor_mount_angle;
+};
+
+enum msm_sensor_camera_id_t {
+	CAMERA_0,
+	CAMERA_1,
+	CAMERA_2,
+	CAMERA_3,
+	MAX_CAMERAS,
+};
+
+struct msm_sensor_id_info_t {
+	unsigned short sensor_id_reg_addr;
+	unsigned short sensor_id;
+	unsigned short sensor_id_mask;
+};
+
+enum msm_sensor_output_format_t {
+	MSM_SENSOR_BAYER,
+	MSM_SENSOR_YCBCR,
+	MSM_SENSOR_META,
+};
+
+struct cam_sensor_power_setting {
+	enum msm_camera_power_seq_type seq_type;
+	unsigned short seq_val;
+	long config_val;
+	unsigned short delay;
+	void *data[10];
+};
+
+struct cam_sensor_board_info {
+	struct cam_camera_slave_info slave_info;
+	int32_t sensor_mount_angle;
+	int32_t secure_mode;
+	int modes_supported;
+	int32_t pos_roll;
+	int32_t pos_yaw;
+	int32_t pos_pitch;
+	int32_t  subdev_id[SUB_MODULE_MAX];
+	int32_t  subdev_intf[SUB_MODULE_MAX];
+	const char *misc_regulator;
+	struct cam_sensor_power_ctrl_t power_info;
+};
+
+enum msm_camera_vreg_name_t {
+	CAM_VDIG,
+	CAM_VIO,
+	CAM_VANA,
+	CAM_VAF,
+	CAM_V_CUSTOM1,
+	CAM_V_CUSTOM2,
+	CAM_VREG_MAX,
+};
+
+struct msm_camera_gpio_conf {
+	void *cam_gpiomux_conf_tbl;
+	uint8_t cam_gpiomux_conf_tbl_size;
+	struct gpio *cam_gpio_common_tbl;
+	uint8_t cam_gpio_common_tbl_size;
+	struct gpio *cam_gpio_req_tbl;
+	uint8_t cam_gpio_req_tbl_size;
+	uint32_t gpio_no_mux;
+	uint32_t *camera_off_table;
+	uint8_t camera_off_table_size;
+	uint32_t *camera_on_table;
+	uint8_t camera_on_table_size;
+	struct msm_camera_gpio_num_info *gpio_num_info;
+};
+
+#endif /* _CAM_SENSOR_CMN_HEADER_ */
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
new file mode 100644
index 0000000..5dcaa48
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.c
@@ -0,0 +1,1932 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/kernel.h>
+#include "cam_sensor_util.h"
+#include <cam_mem_mgr.h>
+#include "cam_res_mgr_api.h"
+
+#define CAM_SENSOR_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SENSOR_PINCTRL_STATE_DEFAULT "cam_default"
+
+#define VALIDATE_VOLTAGE(min, max, config_val) ((config_val) && \
+	(config_val >= min) && (config_val <= max))
+
+static struct i2c_settings_list*
+	cam_sensor_get_i2c_ptr(struct i2c_settings_array *i2c_reg_settings,
+		uint32_t size)
+{
+	struct i2c_settings_list *tmp;
+
+	tmp = kzalloc(sizeof(struct i2c_settings_list), GFP_KERNEL);
+
+	if (tmp != NULL)
+		list_add_tail(&(tmp->list),
+			&(i2c_reg_settings->list_head));
+	else
+		return NULL;
+
+	tmp->i2c_settings.reg_setting = (struct cam_sensor_i2c_reg_array *)
+		kcalloc(size, sizeof(struct cam_sensor_i2c_reg_array),
+			GFP_KERNEL);
+	if (tmp->i2c_settings.reg_setting == NULL) {
+		list_del(&(tmp->list));
+		kfree(tmp);
+		return NULL;
+	}
+	tmp->i2c_settings.size = size;
+
+	return tmp;
+}
+
+int32_t delete_request(struct i2c_settings_array *i2c_array)
+{
+	struct i2c_settings_list *i2c_list = NULL, *i2c_next = NULL;
+	int32_t rc = 0;
+
+	if (i2c_array == NULL) {
+		CAM_ERR(CAM_SENSOR, "FATAL:: Invalid argument");
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(i2c_list, i2c_next,
+		&(i2c_array->list_head), list) {
+		kfree(i2c_list->i2c_settings.reg_setting);
+		list_del(&(i2c_list->list));
+		kfree(i2c_list);
+	}
+	INIT_LIST_HEAD(&(i2c_array->list_head));
+	i2c_array->is_settings_valid = 0;
+
+	return rc;
+}
+
+int32_t cam_sensor_handle_delay(
+	uint32_t **cmd_buf,
+	uint16_t generic_op_code,
+	struct i2c_settings_array *i2c_reg_settings,
+	uint32_t offset, uint32_t *byte_cnt,
+	struct list_head *list_ptr)
+{
+	int32_t rc = 0;
+	struct cam_cmd_unconditional_wait *cmd_uncond_wait =
+		(struct cam_cmd_unconditional_wait *) *cmd_buf;
+	struct i2c_settings_list *i2c_list = NULL;
+
+	if (list_ptr == NULL) {
+		CAM_ERR(CAM_SENSOR, "Invalid list ptr");
+		return -EINVAL;
+	}
+
+	if (offset > 0) {
+		i2c_list =
+			list_entry(list_ptr, struct i2c_settings_list, list);
+		if (generic_op_code ==
+			CAMERA_SENSOR_WAIT_OP_HW_UCND)
+			i2c_list->i2c_settings.reg_setting[offset - 1].delay =
+				cmd_uncond_wait->delay;
+		else
+			i2c_list->i2c_settings.delay = cmd_uncond_wait->delay;
+		(*cmd_buf) +=
+			sizeof(
+			struct cam_cmd_unconditional_wait) / sizeof(uint32_t);
+		(*byte_cnt) +=
+			sizeof(
+			struct cam_cmd_unconditional_wait);
+	} else {
+		CAM_ERR(CAM_SENSOR, "Delay Rxed Before any buffer: %d", offset);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int32_t cam_sensor_handle_poll(
+	uint32_t **cmd_buf,
+	struct i2c_settings_array *i2c_reg_settings,
+	uint32_t *byte_cnt, int32_t *offset,
+	struct list_head **list_ptr)
+{
+	struct i2c_settings_list  *i2c_list;
+	int32_t rc = 0;
+	struct cam_cmd_conditional_wait *cond_wait
+		= (struct cam_cmd_conditional_wait *) *cmd_buf;
+
+	i2c_list =
+		cam_sensor_get_i2c_ptr(i2c_reg_settings, 1);
+	if (!i2c_list || !i2c_list->i2c_settings.reg_setting) {
+		CAM_ERR(CAM_SENSOR, "Failed in allocating mem for list");
+		return -ENOMEM;
+	}
+
+	i2c_list->op_code = CAM_SENSOR_I2C_POLL;
+	i2c_list->i2c_settings.data_type =
+		cond_wait->data_type;
+	i2c_list->i2c_settings.addr_type =
+		cond_wait->addr_type;
+	i2c_list->i2c_settings.reg_setting->reg_addr =
+		cond_wait->reg_addr;
+	i2c_list->i2c_settings.reg_setting->reg_data =
+		cond_wait->reg_data;
+	i2c_list->i2c_settings.reg_setting->delay =
+		cond_wait->timeout;
+
+	(*cmd_buf) += sizeof(struct cam_cmd_conditional_wait) /
+		sizeof(uint32_t);
+	(*byte_cnt) += sizeof(struct cam_cmd_conditional_wait);
+
+	*offset = 1;
+	*list_ptr = &(i2c_list->list);
+
+	return rc;
+}
+
+int32_t cam_sensor_handle_random_write(
+	struct cam_cmd_i2c_random_wr *cam_cmd_i2c_random_wr,
+	struct i2c_settings_array *i2c_reg_settings,
+	uint16_t *cmd_length_in_bytes, int32_t *offset,
+	struct list_head **list)
+{
+	struct i2c_settings_list  *i2c_list;
+	int32_t rc = 0, cnt;
+
+	i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
+		cam_cmd_i2c_random_wr->header.count);
+	if (i2c_list == NULL ||
+		i2c_list->i2c_settings.reg_setting == NULL) {
+		CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
+		return -ENOMEM;
+	}
+
+	*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
+		sizeof(struct i2c_random_wr_payload) *
+		(cam_cmd_i2c_random_wr->header.count));
+	i2c_list->op_code = CAM_SENSOR_I2C_WRITE_RANDOM;
+	i2c_list->i2c_settings.addr_type =
+		cam_cmd_i2c_random_wr->header.addr_type;
+	i2c_list->i2c_settings.data_type =
+		cam_cmd_i2c_random_wr->header.data_type;
+
+	for (cnt = 0; cnt < (cam_cmd_i2c_random_wr->header.count);
+		cnt++) {
+		i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
+			cam_cmd_i2c_random_wr->random_wr_payload[cnt].reg_addr;
+		i2c_list->i2c_settings.reg_setting[cnt].reg_data =
+			cam_cmd_i2c_random_wr->random_wr_payload[cnt].reg_data;
+		i2c_list->i2c_settings.reg_setting[cnt].data_mask = 0;
+	}
+	*offset = cnt;
+	*list = &(i2c_list->list);
+
+	return rc;
+}
+
+static int32_t cam_sensor_handle_continuous_write(
+	struct cam_cmd_i2c_continuous_wr *cam_cmd_i2c_continuous_wr,
+	struct i2c_settings_array *i2c_reg_settings,
+	uint16_t *cmd_length_in_bytes, int32_t *offset,
+	struct list_head **list)
+{
+	struct i2c_settings_list *i2c_list;
+	int32_t rc = 0, cnt;
+
+	i2c_list = cam_sensor_get_i2c_ptr(i2c_reg_settings,
+		cam_cmd_i2c_continuous_wr->header.count);
+	if (i2c_list == NULL ||
+		i2c_list->i2c_settings.reg_setting == NULL) {
+		CAM_ERR(CAM_SENSOR, "Failed in allocating i2c_list");
+		return -ENOMEM;
+	}
+
+	*cmd_length_in_bytes = (sizeof(struct i2c_rdwr_header) +
+		sizeof(cam_cmd_i2c_continuous_wr->reg_addr) +
+		sizeof(struct cam_cmd_read) *
+		(cam_cmd_i2c_continuous_wr->header.count));
+	if (cam_cmd_i2c_continuous_wr->header.op_code ==
+		CAMERA_SENSOR_I2C_OP_CONT_WR_BRST)
+		i2c_list->op_code = CAM_SENSOR_I2C_WRITE_BURST;
+	else if (cam_cmd_i2c_continuous_wr->header.op_code ==
+		CAMERA_SENSOR_I2C_OP_CONT_WR_SEQN)
+		i2c_list->op_code = CAM_SENSOR_I2C_WRITE_SEQ;
+	else
+		return -EINVAL;
+
+	i2c_list->i2c_settings.addr_type =
+		cam_cmd_i2c_continuous_wr->header.addr_type;
+	i2c_list->i2c_settings.data_type =
+		cam_cmd_i2c_continuous_wr->header.data_type;
+	i2c_list->i2c_settings.size =
+		cam_cmd_i2c_continuous_wr->header.count;
+
+	for (cnt = 0; cnt < (cam_cmd_i2c_continuous_wr->header.count);
+		cnt++) {
+		i2c_list->i2c_settings.reg_setting[cnt].reg_addr =
+			cam_cmd_i2c_continuous_wr->reg_addr;
+		i2c_list->i2c_settings.reg_setting[cnt].reg_data =
+			cam_cmd_i2c_continuous_wr->data_read[cnt].reg_data;
+		i2c_list->i2c_settings.reg_setting[cnt].data_mask = 0;
+	}
+	*offset = cnt;
+	*list = &(i2c_list->list);
+
+	return rc;
+}
+
+static int cam_sensor_handle_slave_info(
+	struct camera_io_master *io_master,
+	uint32_t *cmd_buf)
+{
+	int rc = 0;
+	struct cam_cmd_i2c_info *i2c_info = (struct cam_cmd_i2c_info *)cmd_buf;
+
+	if (io_master == NULL || cmd_buf == NULL) {
+		CAM_ERR(CAM_SENSOR, "Invalid args");
+		return -EINVAL;
+	}
+
+	switch (io_master->master_type) {
+	case CCI_MASTER:
+		io_master->cci_client->sid = (i2c_info->slave_addr >> 1);
+		io_master->cci_client->i2c_freq_mode = i2c_info->i2c_freq_mode;
+		break;
+
+	case I2C_MASTER:
+		io_master->client->addr = i2c_info->slave_addr;
+		break;
+
+	case SPI_MASTER:
+		break;
+
+	default:
+		CAM_ERR(CAM_SENSOR, "Invalid master type: %d",
+			io_master->master_type);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * Name : cam_sensor_i2c_command_parser
+ * Description : Parse CSL CCI packet and apply register settings
+ * Parameters :  s_ctrl  input/output    sub_device
+ *              arg     input           cam_control
+ * Description :
+ * Handle multiple I2C RD/WR and WAIT cmd formats in one command
+ * buffer, for example, a command buffer of m x RND_WR + 1 x HW_
+ * WAIT + n x RND_WR with num_cmd_buf = 1. Do not exepect RD/WR
+ * with different cmd_type and op_code in one command buffer.
+ */
+int cam_sensor_i2c_command_parser(
+	struct camera_io_master *io_master,
+	struct i2c_settings_array *i2c_reg_settings,
+	struct cam_cmd_buf_desc   *cmd_desc,
+	int32_t num_cmd_buffers)
+{
+	int16_t                   rc = 0, i = 0;
+	size_t                    len_of_buff = 0;
+	uintptr_t                  generic_ptr;
+	uint16_t                  cmd_length_in_bytes = 0;
+
+	for (i = 0; i < num_cmd_buffers; i++) {
+		uint32_t                  *cmd_buf = NULL;
+		struct common_header      *cmm_hdr;
+		uint16_t                  generic_op_code;
+		uint32_t                  byte_cnt = 0;
+		uint32_t                  j = 0;
+		struct list_head          *list = NULL;
+
+		/*
+		 * It is not expected the same settings to
+		 * be spread across multiple cmd buffers
+		 */
+		CAM_DBG(CAM_SENSOR, "Total cmd Buf in Bytes: %d",
+			cmd_desc[i].length);
+
+		if (!cmd_desc[i].length)
+			continue;
+
+		rc = cam_mem_get_cpu_buf(cmd_desc[i].mem_handle,
+			&generic_ptr, &len_of_buff);
+		cmd_buf = (uint32_t *)generic_ptr;
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"cmd hdl failed:%d, Err: %d, Buffer_len: %zd",
+				cmd_desc[i].mem_handle, rc, len_of_buff);
+			return rc;
+		}
+		cmd_buf += cmd_desc[i].offset / sizeof(uint32_t);
+
+		while (byte_cnt < cmd_desc[i].length) {
+			cmm_hdr = (struct common_header *)cmd_buf;
+			generic_op_code = cmm_hdr->third_byte;
+			switch (cmm_hdr->cmd_type) {
+			case CAMERA_SENSOR_CMD_TYPE_I2C_RNDM_WR: {
+				uint16_t cmd_length_in_bytes   = 0;
+				struct cam_cmd_i2c_random_wr
+					*cam_cmd_i2c_random_wr =
+					(struct cam_cmd_i2c_random_wr *)cmd_buf;
+
+				rc = cam_sensor_handle_random_write(
+					cam_cmd_i2c_random_wr,
+					i2c_reg_settings,
+					&cmd_length_in_bytes, &j, &list);
+				if (rc < 0) {
+					CAM_ERR(CAM_SENSOR,
+					"Failed in random write %d", rc);
+					return rc;
+				}
+
+				cmd_buf += cmd_length_in_bytes /
+					sizeof(uint32_t);
+				byte_cnt += cmd_length_in_bytes;
+				break;
+			}
+			case CAMERA_SENSOR_CMD_TYPE_I2C_CONT_WR: {
+				uint16_t cmd_length_in_bytes   = 0;
+				struct cam_cmd_i2c_continuous_wr
+				*cam_cmd_i2c_continuous_wr =
+				(struct cam_cmd_i2c_continuous_wr *)
+				cmd_buf;
+
+				rc = cam_sensor_handle_continuous_write(
+					cam_cmd_i2c_continuous_wr,
+					i2c_reg_settings,
+					&cmd_length_in_bytes, &j, &list);
+				if (rc < 0) {
+					CAM_ERR(CAM_SENSOR,
+					"Failed in continuous write %d", rc);
+					return rc;
+				}
+
+				cmd_buf += cmd_length_in_bytes /
+					sizeof(uint32_t);
+				byte_cnt += cmd_length_in_bytes;
+				break;
+			}
+			case CAMERA_SENSOR_CMD_TYPE_WAIT: {
+				if (generic_op_code ==
+					CAMERA_SENSOR_WAIT_OP_HW_UCND ||
+					generic_op_code ==
+						CAMERA_SENSOR_WAIT_OP_SW_UCND) {
+
+					rc = cam_sensor_handle_delay(
+						&cmd_buf, generic_op_code,
+						i2c_reg_settings, j, &byte_cnt,
+						list);
+					if (rc < 0) {
+						CAM_ERR(CAM_SENSOR,
+							"delay hdl failed: %d",
+							rc);
+						return rc;
+					}
+
+				} else if (generic_op_code ==
+					CAMERA_SENSOR_WAIT_OP_COND) {
+					rc = cam_sensor_handle_poll(
+						&cmd_buf, i2c_reg_settings,
+						&byte_cnt, &j, &list);
+					if (rc < 0) {
+						CAM_ERR(CAM_SENSOR,
+							"Random read fail: %d",
+							rc);
+						return rc;
+					}
+				} else {
+					CAM_ERR(CAM_SENSOR,
+						"Wrong Wait Command: %d",
+						generic_op_code);
+					return -EINVAL;
+				}
+				break;
+			}
+			case CAMERA_SENSOR_CMD_TYPE_I2C_INFO: {
+				rc = cam_sensor_handle_slave_info(
+					io_master, cmd_buf);
+				if (rc) {
+					CAM_ERR(CAM_SENSOR,
+						"Handle slave info failed with rc: %d",
+						rc);
+					return rc;
+				}
+				cmd_length_in_bytes =
+					sizeof(struct cam_cmd_i2c_info);
+				cmd_buf +=
+					cmd_length_in_bytes / sizeof(uint32_t);
+				byte_cnt += cmd_length_in_bytes;
+				break;
+			}
+			default:
+				CAM_ERR(CAM_SENSOR, "Invalid Command Type:%d",
+					 cmm_hdr->cmd_type);
+				return -EINVAL;
+			}
+		}
+		i2c_reg_settings->is_settings_valid = 1;
+	}
+
+	return rc;
+}
+
+int cam_sensor_util_i2c_apply_setting(
+	struct camera_io_master *io_master_info,
+	struct i2c_settings_list *i2c_list)
+{
+	int32_t rc = 0;
+	uint32_t i, size;
+
+	switch (i2c_list->op_code) {
+	case CAM_SENSOR_I2C_WRITE_RANDOM: {
+		rc = camera_io_dev_write(io_master_info,
+			&(i2c_list->i2c_settings));
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to random write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	break;
+	}
+	case CAM_SENSOR_I2C_WRITE_SEQ: {
+		rc = camera_io_dev_write_continuous(
+			io_master_info, &(i2c_list->i2c_settings), 0);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to seq write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	break;
+	}
+	case CAM_SENSOR_I2C_WRITE_BURST: {
+		rc = camera_io_dev_write_continuous(
+			io_master_info, &(i2c_list->i2c_settings), 1);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"Failed to burst write I2C settings: %d",
+				rc);
+			return rc;
+		}
+	break;
+	}
+	case CAM_SENSOR_I2C_POLL: {
+		size = i2c_list->i2c_settings.size;
+		for (i = 0; i < size; i++) {
+			rc = camera_io_dev_poll(
+			io_master_info,
+			i2c_list->i2c_settings.reg_setting[i].reg_addr,
+			i2c_list->i2c_settings.reg_setting[i].reg_data,
+			i2c_list->i2c_settings.reg_setting[i].data_mask,
+			i2c_list->i2c_settings.addr_type,
+			i2c_list->i2c_settings.data_type,
+			i2c_list->i2c_settings.reg_setting[i].delay);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"i2c poll apply setting Fail: %d", rc);
+				return rc;
+			}
+		}
+	break;
+	}
+	default:
+		CAM_ERR(CAM_SENSOR, "Wrong Opcode: %d", i2c_list->op_code);
+		rc = -EINVAL;
+	break;
+	}
+
+	return rc;
+}
+
+int32_t msm_camera_fill_vreg_params(
+	struct cam_hw_soc_info *soc_info,
+	struct cam_sensor_power_setting *power_setting,
+	uint16_t power_setting_size)
+{
+	int32_t rc = 0, j = 0, i = 0;
+	int num_vreg;
+
+	/* Validate input parameters */
+	if (!soc_info || !power_setting) {
+		CAM_ERR(CAM_SENSOR, "failed: soc_info %pK power_setting %pK",
+			soc_info, power_setting);
+		return -EINVAL;
+	}
+
+	num_vreg = soc_info->num_rgltr;
+
+	if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+		CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < power_setting_size; i++) {
+
+		if (power_setting[i].seq_type < SENSOR_MCLK ||
+			power_setting[i].seq_type >= SENSOR_SEQ_TYPE_MAX) {
+			CAM_ERR(CAM_SENSOR, "failed: Invalid Seq type: %d",
+				power_setting[i].seq_type);
+			return -EINVAL;
+		}
+
+		switch (power_setting[i].seq_type) {
+		case SENSOR_VDIG:
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_vdig")) {
+
+					CAM_DBG(CAM_SENSOR,
+						"i: %d j: %d cam_vdig", i, j);
+					power_setting[i].seq_val = j;
+
+					if (VALIDATE_VOLTAGE(
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
+						power_setting[i].config_val)) {
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		case SENSOR_VIO:
+			for (j = 0; j < num_vreg; j++) {
+
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_vio")) {
+					CAM_DBG(CAM_SENSOR,
+						"i: %d j: %d cam_vio", i, j);
+					power_setting[i].seq_val = j;
+
+					if (VALIDATE_VOLTAGE(
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
+						power_setting[i].config_val)) {
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		case SENSOR_VANA:
+			for (j = 0; j < num_vreg; j++) {
+
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_vana")) {
+					CAM_DBG(CAM_SENSOR,
+						"i: %d j: %d cam_vana", i, j);
+					power_setting[i].seq_val = j;
+
+					if (VALIDATE_VOLTAGE(
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
+						power_setting[i].config_val)) {
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		case SENSOR_VAF:
+			for (j = 0; j < num_vreg; j++) {
+
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_vaf")) {
+					CAM_DBG(CAM_SENSOR,
+						"i: %d j: %d cam_vaf", i, j);
+					power_setting[i].seq_val = j;
+
+					if (VALIDATE_VOLTAGE(
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
+						power_setting[i].config_val)) {
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
+						power_setting[i].config_val;
+					}
+
+					break;
+				}
+
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+
+		case SENSOR_CUSTOM_REG1:
+			for (j = 0; j < num_vreg; j++) {
+
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_v_custom1")) {
+					CAM_DBG(CAM_SENSOR,
+						"i:%d j:%d cam_vcustom1", i, j);
+					power_setting[i].seq_val = j;
+
+					if (VALIDATE_VOLTAGE(
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
+						power_setting[i].config_val)) {
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+		case SENSOR_CUSTOM_REG2:
+			for (j = 0; j < num_vreg; j++) {
+
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_v_custom2")) {
+					CAM_DBG(CAM_SENSOR,
+						"i:%d j:%d cam_vcustom2", i, j);
+					power_setting[i].seq_val = j;
+
+					if (VALIDATE_VOLTAGE(
+						soc_info->rgltr_min_volt[j],
+						soc_info->rgltr_max_volt[j],
+						power_setting[i].config_val)) {
+						soc_info->rgltr_min_volt[j] =
+						soc_info->rgltr_max_volt[j] =
+						power_setting[i].config_val;
+					}
+					break;
+				}
+			}
+			if (j == num_vreg)
+				power_setting[i].seq_val = INVALID_VREG;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return rc;
+}
+
+int cam_sensor_util_request_gpio_table(
+		struct cam_hw_soc_info *soc_info, int gpio_en)
+{
+	int rc = 0, i = 0;
+	uint8_t size = 0;
+	struct cam_soc_gpio_data *gpio_conf =
+			soc_info->gpio_data;
+	struct gpio *gpio_tbl = NULL;
+
+	if (!gpio_conf) {
+		CAM_INFO(CAM_SENSOR, "No GPIO data");
+		return 0;
+	}
+
+	if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+		CAM_INFO(CAM_SENSOR, "No GPIO entry");
+		return -EINVAL;
+	}
+
+	gpio_tbl = gpio_conf->cam_gpio_req_tbl;
+	size = gpio_conf->cam_gpio_req_tbl_size;
+
+	if (!gpio_tbl || !size) {
+		CAM_ERR(CAM_SENSOR, "invalid gpio_tbl %pK / size %d",
+			gpio_tbl, size);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < size; i++) {
+		CAM_DBG(CAM_SENSOR, "i: %d, gpio %d dir %ld",  i,
+			gpio_tbl[i].gpio, gpio_tbl[i].flags);
+	}
+
+	if (gpio_en) {
+		for (i = 0; i < size; i++) {
+			rc = cam_res_mgr_gpio_request(soc_info->dev,
+					gpio_tbl[i].gpio,
+					gpio_tbl[i].flags, gpio_tbl[i].label);
+			if (rc) {
+				/*
+				 * After GPIO request fails, contine to
+				 * apply new gpios, outout a error message
+				 * for driver bringup debug
+				 */
+				CAM_ERR(CAM_SENSOR, "gpio %d:%s request fails",
+					gpio_tbl[i].gpio, gpio_tbl[i].label);
+			}
+		}
+	} else {
+		cam_res_mgr_gpio_free_arry(soc_info->dev, gpio_tbl, size);
+	}
+
+	return rc;
+}
+
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+	int cmd_length, struct cam_sensor_power_ctrl_t *power_info)
+{
+	int32_t rc = 0, tot_size = 0, last_cmd_type = 0;
+	int32_t i = 0, pwr_up = 0, pwr_down = 0;
+	struct cam_sensor_power_setting *pwr_settings;
+	void *ptr = cmd_buf, *scr;
+	struct cam_cmd_power *pwr_cmd = (struct cam_cmd_power *)cmd_buf;
+	struct common_header *cmm_hdr = (struct common_header *)cmd_buf;
+
+	if (!pwr_cmd || !cmd_length) {
+		CAM_ERR(CAM_SENSOR, "Invalid Args: pwr_cmd %pK, cmd_length: %d",
+			pwr_cmd, cmd_length);
+		return -EINVAL;
+	}
+
+	power_info->power_setting_size = 0;
+	power_info->power_setting =
+		kzalloc(sizeof(struct cam_sensor_power_setting) *
+			MAX_POWER_CONFIG, GFP_KERNEL);
+	if (!power_info->power_setting)
+		return -ENOMEM;
+
+	power_info->power_down_setting_size = 0;
+	power_info->power_down_setting =
+		kzalloc(sizeof(struct cam_sensor_power_setting) *
+			MAX_POWER_CONFIG, GFP_KERNEL);
+	if (!power_info->power_down_setting) {
+		kfree(power_info->power_setting);
+		power_info->power_setting = NULL;
+		power_info->power_setting_size = 0;
+		return -ENOMEM;
+	}
+
+	while (tot_size < cmd_length) {
+		if (cmm_hdr->cmd_type ==
+			CAMERA_SENSOR_CMD_TYPE_PWR_UP) {
+			struct cam_cmd_power *pwr_cmd =
+				(struct cam_cmd_power *)ptr;
+
+			power_info->power_setting_size += pwr_cmd->count;
+			if (power_info->power_setting_size > MAX_POWER_CONFIG) {
+				CAM_ERR(CAM_SENSOR,
+					"Invalid: power up setting size %d",
+					power_info->power_setting_size);
+				rc = -EINVAL;
+				goto free_power_settings;
+			}
+			scr = ptr + sizeof(struct cam_cmd_power);
+			tot_size = tot_size + sizeof(struct cam_cmd_power);
+
+			if (pwr_cmd->count == 0)
+				CAM_WARN(CAM_SENSOR, "pwr_up_size is zero");
+
+			for (i = 0; i < pwr_cmd->count; i++, pwr_up++) {
+				power_info->power_setting[pwr_up].seq_type =
+				pwr_cmd->power_settings[i].power_seq_type;
+				power_info->power_setting[pwr_up].config_val =
+				pwr_cmd->power_settings[i].config_val_low;
+				power_info->power_setting[pwr_up].delay = 0;
+				if (i) {
+					scr = scr +
+						sizeof(
+						struct cam_power_settings);
+					tot_size = tot_size +
+						sizeof(
+						struct cam_power_settings);
+				}
+				if (tot_size > cmd_length) {
+					CAM_ERR(CAM_SENSOR,
+						"Error: Cmd Buffer is wrong");
+					rc = -EINVAL;
+					goto free_power_settings;
+				}
+				CAM_DBG(CAM_SENSOR,
+				"Seq Type[%d]: %d Config_val: %ld", pwr_up,
+				power_info->power_setting[pwr_up].seq_type,
+				power_info->power_setting[pwr_up].config_val);
+			}
+			last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_UP;
+			ptr = (void *) scr;
+			cmm_hdr = (struct common_header *)ptr;
+		} else if (cmm_hdr->cmd_type == CAMERA_SENSOR_CMD_TYPE_WAIT) {
+			struct cam_cmd_unconditional_wait *wait_cmd =
+				(struct cam_cmd_unconditional_wait *)ptr;
+			if ((wait_cmd->op_code ==
+				CAMERA_SENSOR_WAIT_OP_SW_UCND) &&
+				(last_cmd_type ==
+				CAMERA_SENSOR_CMD_TYPE_PWR_UP)) {
+				if (pwr_up > 0) {
+					pwr_settings =
+					&power_info->power_setting[pwr_up - 1];
+					pwr_settings->delay +=
+						wait_cmd->delay;
+				} else {
+					CAM_ERR(CAM_SENSOR,
+					"Delay is expected only after valid power up setting");
+				}
+			} else if ((wait_cmd->op_code ==
+				CAMERA_SENSOR_WAIT_OP_SW_UCND) &&
+				(last_cmd_type ==
+				CAMERA_SENSOR_CMD_TYPE_PWR_DOWN)) {
+				if (pwr_down > 0) {
+					pwr_settings =
+					&power_info->power_down_setting[
+						pwr_down - 1];
+					pwr_settings->delay +=
+						wait_cmd->delay;
+				} else {
+					CAM_ERR(CAM_SENSOR,
+					"Delay is expected only after valid power up setting");
+				}
+			} else {
+				CAM_DBG(CAM_SENSOR, "Invalid op code: %d",
+					wait_cmd->op_code);
+			}
+
+			tot_size = tot_size +
+				sizeof(struct cam_cmd_unconditional_wait);
+			if (tot_size > cmd_length) {
+				CAM_ERR(CAM_SENSOR, "Command Buffer is wrong");
+				return -EINVAL;
+			}
+			scr = (void *) (wait_cmd);
+			ptr = (void *)
+				(scr +
+				sizeof(struct cam_cmd_unconditional_wait));
+			CAM_DBG(CAM_SENSOR, "ptr: %pK sizeof: %d Next: %pK",
+				scr, (int32_t)sizeof(
+				struct cam_cmd_unconditional_wait), ptr);
+
+			cmm_hdr = (struct common_header *)ptr;
+		} else if (cmm_hdr->cmd_type ==
+			CAMERA_SENSOR_CMD_TYPE_PWR_DOWN) {
+			struct cam_cmd_power *pwr_cmd =
+				(struct cam_cmd_power *)ptr;
+
+			scr = ptr + sizeof(struct cam_cmd_power);
+			tot_size = tot_size + sizeof(struct cam_cmd_power);
+			power_info->power_down_setting_size += pwr_cmd->count;
+			if (power_info->power_down_setting_size >
+				MAX_POWER_CONFIG) {
+				CAM_ERR(CAM_SENSOR,
+					"Invalid: power down setting size %d",
+					power_info->power_down_setting_size);
+				rc = -EINVAL;
+				goto free_power_settings;
+			}
+
+			if (pwr_cmd->count == 0)
+				CAM_ERR(CAM_SENSOR, "pwr_down size is zero");
+
+			for (i = 0; i < pwr_cmd->count; i++, pwr_down++) {
+				pwr_settings =
+				&power_info->power_down_setting[pwr_down];
+				pwr_settings->seq_type =
+				pwr_cmd->power_settings[i].power_seq_type;
+				pwr_settings->config_val =
+				pwr_cmd->power_settings[i].config_val_low;
+				power_info->power_down_setting[pwr_down].delay
+					= 0;
+				if (i) {
+					scr = scr +
+						sizeof(
+						struct cam_power_settings);
+					tot_size =
+						tot_size +
+						sizeof(
+						struct cam_power_settings);
+				}
+				if (tot_size > cmd_length) {
+					CAM_ERR(CAM_SENSOR,
+						"Command Buffer is wrong");
+					rc = -EINVAL;
+					goto free_power_settings;
+				}
+				CAM_DBG(CAM_SENSOR,
+					"Seq Type[%d]: %d Config_val: %ld",
+					pwr_down, pwr_settings->seq_type,
+					pwr_settings->config_val);
+			}
+			last_cmd_type = CAMERA_SENSOR_CMD_TYPE_PWR_DOWN;
+			ptr = (void *) scr;
+			cmm_hdr = (struct common_header *)ptr;
+		} else {
+			CAM_ERR(CAM_SENSOR,
+				"Error: Un expected Header Type: %d",
+				cmm_hdr->cmd_type);
+			rc = -EINVAL;
+			goto free_power_settings;
+		}
+	}
+
+	return rc;
+free_power_settings:
+	kfree(power_info->power_down_setting);
+	kfree(power_info->power_setting);
+	power_info->power_down_setting = NULL;
+	power_info->power_setting = NULL;
+	power_info->power_down_setting_size = 0;
+	power_info->power_setting_size = 0;
+	return rc;
+}
+
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+	struct cam_hw_soc_info *soc_info,
+	struct cam_sensor_power_ctrl_t *power_info)
+{
+	int rc = 0, i;
+	int count = 0;
+	const char *seq_name = NULL;
+	uint32_t *array = NULL;
+	struct cam_sensor_power_setting *ps;
+	int c, end;
+
+	if (!power_info)
+		return -EINVAL;
+
+	count = of_property_count_strings(of_node, "qcom,cam-power-seq-type");
+	power_info->power_setting_size = count;
+
+	CAM_DBG(CAM_SENSOR, "qcom,cam-power-seq-type count %d", count);
+
+	if (count <= 0)
+		return 0;
+
+	ps = kcalloc(count, sizeof(*ps), GFP_KERNEL);
+	if (!ps)
+		return -ENOMEM;
+	power_info->power_setting = ps;
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"qcom,cam-power-seq-type", i, &seq_name);
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "failed");
+			goto ERROR1;
+		}
+		CAM_DBG(CAM_SENSOR, "seq_name[%d] = %s", i, seq_name);
+		if (!strcmp(seq_name, "cam_vio")) {
+			ps[i].seq_type = SENSOR_VIO;
+		} else if (!strcmp(seq_name, "cam_vana")) {
+			ps[i].seq_type = SENSOR_VANA;
+		} else if (!strcmp(seq_name, "cam_clk")) {
+			ps[i].seq_type = SENSOR_MCLK;
+		} else {
+			CAM_ERR(CAM_SENSOR, "unrecognized seq-type %s",
+				seq_name);
+			rc = -EILSEQ;
+			goto ERROR1;
+		}
+		CAM_DBG(CAM_SENSOR, "seq_type[%d] %d", i, ps[i].seq_type);
+	}
+
+	array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+	if (!array) {
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-cfg-val",
+		array, count);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed ");
+		goto ERROR2;
+	}
+
+	for (i = 0; i < count; i++) {
+		ps[i].config_val = array[i];
+		CAM_DBG(CAM_SENSOR, "power_setting[%d].config_val = %ld", i,
+			ps[i].config_val);
+	}
+
+	rc = of_property_read_u32_array(of_node, "qcom,cam-power-seq-delay",
+		array, count);
+	if (rc < 0) {
+		CAM_ERR(CAM_SENSOR, "failed");
+		goto ERROR2;
+	}
+	for (i = 0; i < count; i++) {
+		ps[i].delay = array[i];
+		CAM_DBG(CAM_SENSOR, "power_setting[%d].delay = %d", i,
+			ps[i].delay);
+	}
+	kfree(array);
+
+	power_info->power_down_setting =
+		kcalloc(count, sizeof(*ps), GFP_KERNEL);
+
+	if (!power_info->power_down_setting) {
+		CAM_ERR(CAM_SENSOR, "failed");
+		rc = -ENOMEM;
+		goto ERROR1;
+	}
+
+	power_info->power_down_setting_size = count;
+
+	end = count - 1;
+
+	for (c = 0; c < count; c++) {
+		power_info->power_down_setting[c] = ps[end];
+		end--;
+	}
+	return rc;
+ERROR2:
+	kfree(array);
+ERROR1:
+	kfree(ps);
+	return rc;
+}
+
+int cam_sensor_util_init_gpio_pin_tbl(
+	struct cam_hw_soc_info *soc_info,
+	struct msm_camera_gpio_num_info **pgpio_num_info)
+{
+	int rc = 0, val = 0;
+	uint32_t gpio_array_size;
+	struct device_node *of_node = NULL;
+	struct cam_soc_gpio_data *gconf = NULL;
+	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+	if (!soc_info->dev) {
+		CAM_ERR(CAM_SENSOR, "device node NULL");
+		return -EINVAL;
+	}
+
+	of_node = soc_info->dev->of_node;
+
+	gconf = soc_info->gpio_data;
+	if (!gconf) {
+		CAM_ERR(CAM_SENSOR, "No gpio_common_table is found");
+		return -EINVAL;
+	}
+
+	if (!gconf->cam_gpio_common_tbl) {
+		CAM_ERR(CAM_SENSOR, "gpio_common_table is not initialized");
+		return -EINVAL;
+	}
+
+	gpio_array_size = gconf->cam_gpio_common_tbl_size;
+
+	if (!gpio_array_size) {
+		CAM_ERR(CAM_SENSOR, "invalid size of gpio table");
+		return -EINVAL;
+	}
+
+	*pgpio_num_info = kzalloc(sizeof(struct msm_camera_gpio_num_info),
+		GFP_KERNEL);
+	if (!*pgpio_num_info)
+		return -ENOMEM;
+	gpio_num_info = *pgpio_num_info;
+
+	rc = of_property_read_u32(of_node, "gpio-vana", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "read gpio-vana failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-vana invalid %d", val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_VANA] =
+				gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VANA] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-vana %d",
+			gpio_num_info->gpio_num[SENSOR_VANA]);
+	}
+
+	rc = of_property_read_u32(of_node, "gpio-vio", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "read gpio-vio failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-vio invalid %d", val);
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_VIO] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VIO] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-vio %d",
+			gpio_num_info->gpio_num[SENSOR_VIO]);
+	}
+
+	rc = of_property_read_u32(of_node, "gpio-vaf", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "read gpio-vaf failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-vaf invalid %d", val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_VAF] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VAF] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-vaf %d",
+			gpio_num_info->gpio_num[SENSOR_VAF]);
+	}
+
+	rc = of_property_read_u32(of_node, "gpio-vdig", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "read gpio-vdig failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-vdig invalid %d", val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_VDIG] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VDIG] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-vdig %d",
+				gpio_num_info->gpio_num[SENSOR_VDIG]);
+	}
+
+	rc = of_property_read_u32(of_node, "gpio-reset", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR, "read gpio-reset failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-reset invalid %d", val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_RESET] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_RESET] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-reset %d",
+			gpio_num_info->gpio_num[SENSOR_RESET]);
+	}
+
+	rc = of_property_read_u32(of_node, "gpio-standby", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"read gpio-standby failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-standby invalid %d", val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_STANDBY] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_STANDBY] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-standby %d",
+			gpio_num_info->gpio_num[SENSOR_STANDBY]);
+	}
+
+	rc = of_property_read_u32(of_node, "gpio-af-pwdm", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"read gpio-af-pwdm failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-af-pwdm invalid %d", val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_VAF_PWDM] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_VAF_PWDM] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-af-pwdm %d",
+			gpio_num_info->gpio_num[SENSOR_VAF_PWDM]);
+	}
+
+	rc = of_property_read_u32(of_node, "gpio-custom1", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"read gpio-custom1 failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-custom1 invalid %d", val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_CUSTOM_GPIO1] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-custom1 %d",
+			gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO1]);
+	}
+
+	rc = of_property_read_u32(of_node, "gpio-custom2", &val);
+	if (rc != -EINVAL) {
+		if (rc < 0) {
+			CAM_ERR(CAM_SENSOR,
+				"read gpio-custom2 failed rc %d", rc);
+			goto free_gpio_info;
+		} else if (val >= gpio_array_size) {
+			CAM_ERR(CAM_SENSOR, "gpio-custom2 invalid %d", val);
+			rc = -EINVAL;
+			goto free_gpio_info;
+		}
+		gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2] =
+			gconf->cam_gpio_common_tbl[val].gpio;
+		gpio_num_info->valid[SENSOR_CUSTOM_GPIO2] = 1;
+
+		CAM_DBG(CAM_SENSOR, "gpio-custom2 %d",
+			gpio_num_info->gpio_num[SENSOR_CUSTOM_GPIO2]);
+	} else {
+		rc = 0;
+	}
+
+	return rc;
+
+free_gpio_info:
+	kfree(gpio_num_info);
+	gpio_num_info = NULL;
+	return rc;
+}
+
+int msm_camera_pinctrl_init(
+	struct msm_pinctrl_info *sensor_pctrl, struct device *dev)
+{
+
+	sensor_pctrl->pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR_OR_NULL(sensor_pctrl->pinctrl)) {
+		CAM_DBG(CAM_SENSOR, "Getting pinctrl handle failed");
+		return -EINVAL;
+	}
+	sensor_pctrl->gpio_state_active =
+		pinctrl_lookup_state(sensor_pctrl->pinctrl,
+				CAM_SENSOR_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_active)) {
+		CAM_ERR(CAM_SENSOR,
+			"Failed to get the active state pinctrl handle");
+		return -EINVAL;
+	}
+	sensor_pctrl->gpio_state_suspend
+		= pinctrl_lookup_state(sensor_pctrl->pinctrl,
+				CAM_SENSOR_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(sensor_pctrl->gpio_state_suspend)) {
+		CAM_ERR(CAM_SENSOR,
+			"Failed to get the suspend state pinctrl handle");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_sensor_bob_pwm_mode_switch(struct cam_hw_soc_info *soc_info,
+	int bob_reg_idx, bool flag)
+{
+	int rc = 0;
+	uint32_t op_current =
+		(flag == true) ? soc_info->rgltr_op_mode[bob_reg_idx] : 0;
+
+	if (soc_info->rgltr[bob_reg_idx] != NULL) {
+		rc = regulator_set_load(soc_info->rgltr[bob_reg_idx],
+			op_current);
+		if (rc)
+			CAM_WARN(CAM_SENSOR,
+				"BoB PWM SetLoad failed rc: %d", rc);
+	}
+
+	return rc;
+}
+
+int msm_cam_sensor_handle_reg_gpio(int seq_type,
+	struct msm_camera_gpio_num_info *gpio_num_info, int val)
+{
+	int gpio_offset = -1;
+
+	if (!gpio_num_info) {
+		CAM_INFO(CAM_SENSOR, "Input Parameters are not proper");
+		return 0;
+	}
+
+	CAM_DBG(CAM_SENSOR, "Seq type: %d, config: %d", seq_type, val);
+
+	gpio_offset = seq_type;
+
+	if (gpio_num_info->valid[gpio_offset] == 1) {
+		CAM_DBG(CAM_SENSOR, "VALID GPIO offset: %d, seqtype: %d",
+			 gpio_offset, seq_type);
+		cam_res_mgr_gpio_set_value(
+			gpio_num_info->gpio_num
+			[gpio_offset], val);
+	}
+
+	return 0;
+}
+
+static int cam_config_mclk_reg(struct cam_sensor_power_ctrl_t *ctrl,
+	struct cam_hw_soc_info *soc_info, int32_t index)
+{
+	int32_t num_vreg = 0, j = 0, rc = 0, idx = 0;
+	struct cam_sensor_power_setting *ps = NULL;
+	struct cam_sensor_power_setting *pd = NULL;
+
+	num_vreg = soc_info->num_rgltr;
+
+	pd = &ctrl->power_down_setting[index];
+
+	for (j = 0; j < num_vreg; j++) {
+		if (!strcmp(soc_info->rgltr_name[j], "cam_clk")) {
+			ps = NULL;
+			for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+				if (ctrl->power_setting[idx].seq_type ==
+					pd->seq_type) {
+					ps = &ctrl->power_setting[idx];
+					break;
+				}
+			}
+
+			if (ps != NULL) {
+				CAM_DBG(CAM_SENSOR, "Disable MCLK Regulator");
+				rc = cam_soc_util_regulator_disable(
+					soc_info->rgltr[j],
+					soc_info->rgltr_name[j],
+					soc_info->rgltr_min_volt[j],
+					soc_info->rgltr_max_volt[j],
+					soc_info->rgltr_op_mode[j],
+					soc_info->rgltr_delay[j]);
+
+				if (rc) {
+					CAM_ERR(CAM_SENSOR,
+						"MCLK REG DISALBE FAILED: %d",
+						rc);
+					return rc;
+				}
+
+				ps->data[0] =
+					soc_info->rgltr[j];
+
+				regulator_put(
+					soc_info->rgltr[j]);
+				soc_info->rgltr[j] = NULL;
+			}
+		}
+	}
+
+	return rc;
+}
+
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+		struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0, index = 0, no_gpio = 0, ret = 0, num_vreg, j = 0, i = 0;
+	int32_t vreg_idx = -1;
+	struct cam_sensor_power_setting *power_setting = NULL;
+	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+	CAM_DBG(CAM_SENSOR, "Enter");
+	if (!ctrl) {
+		CAM_ERR(CAM_SENSOR, "Invalid ctrl handle");
+		return -EINVAL;
+	}
+
+	gpio_num_info = ctrl->gpio_num_info;
+	num_vreg = soc_info->num_rgltr;
+
+	if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+		CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
+		return -EINVAL;
+	}
+
+	if (soc_info->use_shared_clk)
+		cam_res_mgr_shared_clk_config(true);
+
+	ret = msm_camera_pinctrl_init(&(ctrl->pinctrl_info), ctrl->dev);
+	if (ret < 0) {
+		/* Some sensor subdev no pinctrl. */
+		CAM_DBG(CAM_SENSOR, "Initialization of pinctrl failed");
+		ctrl->cam_pinctrl_status = 0;
+	} else {
+		ctrl->cam_pinctrl_status = 1;
+	}
+
+	if (cam_res_mgr_shared_pinctrl_init()) {
+		CAM_ERR(CAM_SENSOR,
+			"Failed to init shared pinctrl");
+		return -EINVAL;
+	}
+
+	rc = cam_sensor_util_request_gpio_table(soc_info, 1);
+	if (rc < 0)
+		no_gpio = rc;
+
+	if (ctrl->cam_pinctrl_status) {
+		ret = pinctrl_select_state(
+			ctrl->pinctrl_info.pinctrl,
+			ctrl->pinctrl_info.gpio_state_active);
+		if (ret)
+			CAM_ERR(CAM_SENSOR, "cannot set pin to active state");
+	}
+
+	ret = cam_res_mgr_shared_pinctrl_select_state(true);
+	if (ret)
+		CAM_ERR(CAM_SENSOR,
+			"Cannot set shared pin to active state");
+
+	CAM_DBG(CAM_SENSOR, "power setting size: %d", ctrl->power_setting_size);
+
+	for (index = 0; index < ctrl->power_setting_size; index++) {
+		CAM_DBG(CAM_SENSOR, "index: %d", index);
+		power_setting = &ctrl->power_setting[index];
+		if (!power_setting) {
+			CAM_ERR(CAM_SENSOR,
+				"Invalid power up settings for index %d",
+				index);
+			return -EINVAL;
+		}
+
+		CAM_DBG(CAM_SENSOR, "seq_type %d", power_setting->seq_type);
+
+		switch (power_setting->seq_type) {
+		case SENSOR_MCLK:
+			if (power_setting->seq_val >= soc_info->num_clk) {
+				CAM_ERR(CAM_SENSOR, "clk index %d >= max %u",
+					power_setting->seq_val,
+					soc_info->num_clk);
+				goto power_up_failed;
+			}
+			for (j = 0; j < num_vreg; j++) {
+				if (!strcmp(soc_info->rgltr_name[j],
+					"cam_clk")) {
+					CAM_DBG(CAM_SENSOR,
+						"Enable cam_clk: %d", j);
+
+					soc_info->rgltr[j] =
+					regulator_get(
+						soc_info->dev,
+						soc_info->rgltr_name[j]);
+
+					if (IS_ERR_OR_NULL(
+						soc_info->rgltr[j])) {
+						rc = PTR_ERR(
+							soc_info->rgltr[j]);
+						rc = rc ? rc : -EINVAL;
+						CAM_ERR(CAM_SENSOR,
+							"vreg %s %d",
+							soc_info->rgltr_name[j],
+							rc);
+						soc_info->rgltr[j] = NULL;
+						goto power_up_failed;
+					}
+
+					rc =  cam_soc_util_regulator_enable(
+					soc_info->rgltr[j],
+					soc_info->rgltr_name[j],
+					soc_info->rgltr_min_volt[j],
+					soc_info->rgltr_max_volt[j],
+					soc_info->rgltr_op_mode[j],
+					soc_info->rgltr_delay[j]);
+					if (rc) {
+						CAM_ERR(CAM_SENSOR,
+							"Reg enable failed");
+						goto power_up_failed;
+					}
+					power_setting->data[0] =
+						soc_info->rgltr[j];
+				}
+			}
+			if (power_setting->config_val)
+				soc_info->clk_rate[0][power_setting->seq_val] =
+					power_setting->config_val;
+
+			for (j = 0; j < soc_info->num_clk; j++) {
+				rc = cam_soc_util_clk_enable(soc_info->clk[j],
+					soc_info->clk_name[j],
+					soc_info->clk_rate[0][j]);
+				if (rc)
+					break;
+			}
+
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR, "clk enable failed");
+				goto power_up_failed;
+			}
+			break;
+		case SENSOR_RESET:
+		case SENSOR_STANDBY:
+		case SENSOR_CUSTOM_GPIO1:
+		case SENSOR_CUSTOM_GPIO2:
+			if (no_gpio) {
+				CAM_ERR(CAM_SENSOR, "request gpio failed");
+				return no_gpio;
+			}
+			if (!gpio_num_info) {
+				CAM_ERR(CAM_SENSOR, "Invalid gpio_num_info");
+				goto power_up_failed;
+			}
+			CAM_DBG(CAM_SENSOR, "gpio set val %d",
+				gpio_num_info->gpio_num
+				[power_setting->seq_type]);
+
+			rc = msm_cam_sensor_handle_reg_gpio(
+				power_setting->seq_type,
+				gpio_num_info,
+				(int) power_setting->config_val);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"Error in handling VREG GPIO");
+				goto power_up_failed;
+			}
+			break;
+		case SENSOR_VANA:
+		case SENSOR_VDIG:
+		case SENSOR_VIO:
+		case SENSOR_VAF:
+		case SENSOR_VAF_PWDM:
+		case SENSOR_CUSTOM_REG1:
+		case SENSOR_CUSTOM_REG2:
+			if (power_setting->seq_val == INVALID_VREG)
+				break;
+
+			if (power_setting->seq_val >= CAM_VREG_MAX) {
+				CAM_ERR(CAM_SENSOR, "vreg index %d >= max %d",
+					power_setting->seq_val,
+					CAM_VREG_MAX);
+				goto power_up_failed;
+			}
+			if (power_setting->seq_val < num_vreg) {
+				CAM_DBG(CAM_SENSOR, "Enable Regulator");
+				vreg_idx = power_setting->seq_val;
+
+				soc_info->rgltr[vreg_idx] =
+					regulator_get(soc_info->dev,
+						soc_info->rgltr_name[vreg_idx]);
+				if (IS_ERR_OR_NULL(
+					soc_info->rgltr[vreg_idx])) {
+					rc = PTR_ERR(soc_info->rgltr[vreg_idx]);
+					rc = rc ? rc : -EINVAL;
+
+					CAM_ERR(CAM_SENSOR, "%s get failed %d",
+						soc_info->rgltr_name[vreg_idx],
+						rc);
+
+					soc_info->rgltr[vreg_idx] = NULL;
+					goto power_up_failed;
+				}
+
+				rc =  cam_soc_util_regulator_enable(
+					soc_info->rgltr[vreg_idx],
+					soc_info->rgltr_name[vreg_idx],
+					soc_info->rgltr_min_volt[vreg_idx],
+					soc_info->rgltr_max_volt[vreg_idx],
+					soc_info->rgltr_op_mode[vreg_idx],
+					soc_info->rgltr_delay[vreg_idx]);
+				if (rc) {
+					CAM_ERR(CAM_SENSOR,
+						"Reg Enable failed for %s",
+						soc_info->rgltr_name[vreg_idx]);
+					goto power_up_failed;
+				}
+				power_setting->data[0] =
+						soc_info->rgltr[vreg_idx];
+			} else {
+				CAM_ERR(CAM_SENSOR, "usr_idx:%d dts_idx:%d",
+					power_setting->seq_val, num_vreg);
+			}
+
+			rc = msm_cam_sensor_handle_reg_gpio(
+				power_setting->seq_type,
+				gpio_num_info, 1);
+			if (rc < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"Error in handling VREG GPIO");
+				goto power_up_failed;
+			}
+			break;
+		default:
+			CAM_ERR(CAM_SENSOR, "error power seq type %d",
+				power_setting->seq_type);
+			break;
+		}
+		if (power_setting->delay > 20)
+			msleep(power_setting->delay);
+		else if (power_setting->delay)
+			usleep_range(power_setting->delay * 1000,
+				(power_setting->delay * 1000) + 1000);
+	}
+
+	ret = cam_res_mgr_shared_pinctrl_post_init();
+	if (ret)
+		CAM_ERR(CAM_SENSOR,
+			"Failed to post init shared pinctrl");
+
+	return 0;
+power_up_failed:
+	CAM_ERR(CAM_SENSOR, "failed");
+	for (index--; index >= 0; index--) {
+		CAM_DBG(CAM_SENSOR, "index %d",  index);
+		power_setting = &ctrl->power_setting[index];
+		CAM_DBG(CAM_SENSOR, "type %d",
+			power_setting->seq_type);
+		switch (power_setting->seq_type) {
+		case SENSOR_MCLK:
+			for (i = soc_info->num_clk - 1; i >= 0; i--) {
+				cam_soc_util_clk_disable(soc_info->clk[i],
+					soc_info->clk_name[i]);
+			}
+			ret = cam_config_mclk_reg(ctrl, soc_info, index);
+			if (ret < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"config clk reg failed rc: %d", ret);
+				continue;
+			}
+			break;
+		case SENSOR_RESET:
+		case SENSOR_STANDBY:
+		case SENSOR_CUSTOM_GPIO1:
+		case SENSOR_CUSTOM_GPIO2:
+			if (!gpio_num_info)
+				continue;
+			if (!gpio_num_info->valid
+				[power_setting->seq_type])
+				continue;
+			cam_res_mgr_gpio_set_value(
+				gpio_num_info->gpio_num
+				[power_setting->seq_type], GPIOF_OUT_INIT_LOW);
+			break;
+		case SENSOR_VANA:
+		case SENSOR_VDIG:
+		case SENSOR_VIO:
+		case SENSOR_VAF:
+		case SENSOR_VAF_PWDM:
+		case SENSOR_CUSTOM_REG1:
+		case SENSOR_CUSTOM_REG2:
+			if (power_setting->seq_val < num_vreg) {
+				CAM_DBG(CAM_SENSOR, "Disable Regulator");
+				vreg_idx = power_setting->seq_val;
+
+				rc =  cam_soc_util_regulator_disable(
+					soc_info->rgltr[vreg_idx],
+					soc_info->rgltr_name[vreg_idx],
+					soc_info->rgltr_min_volt[vreg_idx],
+					soc_info->rgltr_max_volt[vreg_idx],
+					soc_info->rgltr_op_mode[vreg_idx],
+					soc_info->rgltr_delay[vreg_idx]);
+
+				if (rc) {
+					CAM_ERR(CAM_SENSOR,
+					"Fail to disalbe reg: %s",
+					soc_info->rgltr_name[vreg_idx]);
+					soc_info->rgltr[vreg_idx] = NULL;
+					msm_cam_sensor_handle_reg_gpio(
+						power_setting->seq_type,
+						gpio_num_info,
+						GPIOF_OUT_INIT_LOW);
+					continue;
+				}
+				power_setting->data[0] =
+						soc_info->rgltr[vreg_idx];
+
+				regulator_put(soc_info->rgltr[vreg_idx]);
+				soc_info->rgltr[vreg_idx] = NULL;
+			} else {
+				CAM_ERR(CAM_SENSOR, "seq_val:%d > num_vreg: %d",
+					power_setting->seq_val, num_vreg);
+			}
+
+			msm_cam_sensor_handle_reg_gpio(power_setting->seq_type,
+				gpio_num_info, GPIOF_OUT_INIT_LOW);
+
+			break;
+		default:
+			CAM_ERR(CAM_SENSOR, "error power seq type %d",
+				power_setting->seq_type);
+			break;
+		}
+		if (power_setting->delay > 20) {
+			msleep(power_setting->delay);
+		} else if (power_setting->delay) {
+			usleep_range(power_setting->delay * 1000,
+				(power_setting->delay * 1000) + 1000);
+		}
+	}
+
+	if (ctrl->cam_pinctrl_status) {
+		ret = pinctrl_select_state(
+			ctrl->pinctrl_info.pinctrl,
+			ctrl->pinctrl_info.gpio_state_suspend);
+		if (ret)
+			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
+		cam_res_mgr_shared_pinctrl_select_state(false);
+		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		cam_res_mgr_shared_pinctrl_put();
+	}
+
+	if (soc_info->use_shared_clk)
+		cam_res_mgr_shared_clk_config(false);
+
+	ctrl->cam_pinctrl_status = 0;
+
+	cam_sensor_util_request_gpio_table(soc_info, 0);
+
+	return rc;
+}
+
+static struct cam_sensor_power_setting*
+msm_camera_get_power_settings(struct cam_sensor_power_ctrl_t *ctrl,
+				enum msm_camera_power_seq_type seq_type,
+				uint16_t seq_val)
+{
+	struct cam_sensor_power_setting *power_setting, *ps = NULL;
+	int idx;
+
+	for (idx = 0; idx < ctrl->power_setting_size; idx++) {
+		power_setting = &ctrl->power_setting[idx];
+		if (power_setting->seq_type == seq_type &&
+			power_setting->seq_val ==  seq_val) {
+			ps = power_setting;
+			return ps;
+		}
+
+	}
+
+	return ps;
+}
+
+int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+		struct cam_hw_soc_info *soc_info)
+{
+	int index = 0, ret = 0, num_vreg = 0, i;
+	struct cam_sensor_power_setting *pd = NULL;
+	struct cam_sensor_power_setting *ps = NULL;
+	struct msm_camera_gpio_num_info *gpio_num_info = NULL;
+
+	CAM_DBG(CAM_SENSOR, "Enter");
+	if (!ctrl || !soc_info) {
+		CAM_ERR(CAM_SENSOR, "failed ctrl %pK",  ctrl);
+		return -EINVAL;
+	}
+
+	gpio_num_info = ctrl->gpio_num_info;
+	num_vreg = soc_info->num_rgltr;
+
+	if ((num_vreg <= 0) || (num_vreg > CAM_SOC_MAX_REGULATOR)) {
+		CAM_ERR(CAM_SENSOR, "failed: num_vreg %d", num_vreg);
+		return -EINVAL;
+	}
+
+	if (ctrl->power_down_setting_size > MAX_POWER_CONFIG) {
+		CAM_ERR(CAM_SENSOR, "Invalid: power setting size %d",
+			ctrl->power_setting_size);
+		return -EINVAL;
+	}
+
+	for (index = 0; index < ctrl->power_down_setting_size; index++) {
+		CAM_DBG(CAM_SENSOR, "power_down_index %d",  index);
+		pd = &ctrl->power_down_setting[index];
+		if (!pd) {
+			CAM_ERR(CAM_SENSOR,
+				"Invalid power down settings for index %d",
+				index);
+			return -EINVAL;
+		}
+
+		ps = NULL;
+		CAM_DBG(CAM_SENSOR, "seq_type %d",  pd->seq_type);
+		switch (pd->seq_type) {
+		case SENSOR_MCLK:
+			for (i = soc_info->num_clk - 1; i >= 0; i--) {
+				cam_soc_util_clk_disable(soc_info->clk[i],
+					soc_info->clk_name[i]);
+			}
+
+			ret = cam_config_mclk_reg(ctrl, soc_info, index);
+			if (ret < 0) {
+				CAM_ERR(CAM_SENSOR,
+					"config clk reg failed rc: %d", ret);
+				continue;
+			}
+			break;
+		case SENSOR_RESET:
+		case SENSOR_STANDBY:
+		case SENSOR_CUSTOM_GPIO1:
+		case SENSOR_CUSTOM_GPIO2:
+
+			if (!gpio_num_info->valid[pd->seq_type])
+				continue;
+
+			cam_res_mgr_gpio_set_value(
+				gpio_num_info->gpio_num
+				[pd->seq_type],
+				(int) pd->config_val);
+
+			break;
+		case SENSOR_VANA:
+		case SENSOR_VDIG:
+		case SENSOR_VIO:
+		case SENSOR_VAF:
+		case SENSOR_VAF_PWDM:
+		case SENSOR_CUSTOM_REG1:
+		case SENSOR_CUSTOM_REG2:
+			if (pd->seq_val == INVALID_VREG)
+				break;
+
+			ps = msm_camera_get_power_settings(
+				ctrl, pd->seq_type,
+				pd->seq_val);
+			if (ps) {
+				if (pd->seq_val < num_vreg) {
+					CAM_DBG(CAM_SENSOR,
+						"Disable Regulator");
+					ret =  cam_soc_util_regulator_disable(
+					soc_info->rgltr[ps->seq_val],
+					soc_info->rgltr_name[ps->seq_val],
+					soc_info->rgltr_min_volt[ps->seq_val],
+					soc_info->rgltr_max_volt[ps->seq_val],
+					soc_info->rgltr_op_mode[ps->seq_val],
+					soc_info->rgltr_delay[ps->seq_val]);
+					if (ret) {
+						CAM_ERR(CAM_SENSOR,
+						"Reg: %s disable failed",
+						soc_info->rgltr_name[
+							ps->seq_val]);
+						soc_info->rgltr[ps->seq_val] =
+							NULL;
+						msm_cam_sensor_handle_reg_gpio(
+							pd->seq_type,
+							gpio_num_info,
+							GPIOF_OUT_INIT_LOW);
+						continue;
+					}
+					ps->data[0] =
+						soc_info->rgltr[ps->seq_val];
+					regulator_put(
+						soc_info->rgltr[ps->seq_val]);
+					soc_info->rgltr[ps->seq_val] = NULL;
+				} else {
+					CAM_ERR(CAM_SENSOR,
+						"seq_val:%d > num_vreg: %d",
+						 pd->seq_val,
+						num_vreg);
+				}
+			} else
+				CAM_ERR(CAM_SENSOR,
+					"error in power up/down seq");
+
+			ret = msm_cam_sensor_handle_reg_gpio(pd->seq_type,
+				gpio_num_info, GPIOF_OUT_INIT_LOW);
+
+			if (ret < 0)
+				CAM_ERR(CAM_SENSOR,
+					"Error disabling VREG GPIO");
+			break;
+		default:
+			CAM_ERR(CAM_SENSOR, "error power seq type %d",
+				pd->seq_type);
+			break;
+		}
+		if (pd->delay > 20)
+			msleep(pd->delay);
+		else if (pd->delay)
+			usleep_range(pd->delay * 1000,
+				(pd->delay * 1000) + 1000);
+	}
+
+	if (ctrl->cam_pinctrl_status) {
+		ret = pinctrl_select_state(
+				ctrl->pinctrl_info.pinctrl,
+				ctrl->pinctrl_info.gpio_state_suspend);
+		if (ret)
+			CAM_ERR(CAM_SENSOR, "cannot set pin to suspend state");
+
+		cam_res_mgr_shared_pinctrl_select_state(false);
+		devm_pinctrl_put(ctrl->pinctrl_info.pinctrl);
+		cam_res_mgr_shared_pinctrl_put();
+	}
+
+	if (soc_info->use_shared_clk)
+		cam_res_mgr_shared_clk_config(false);
+
+	ctrl->cam_pinctrl_status = 0;
+
+	cam_sensor_util_request_gpio_table(soc_info, 0);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
new file mode 100644
index 0000000..ff1c232
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_sensor_utils/cam_sensor_util.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SENSOR_UTIL_H_
+#define _CAM_SENSOR_UTIL_H_
+
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/of.h>
+#include <cam_sensor_cmn_header.h>
+#include <cam_req_mgr_util.h>
+#include <cam_req_mgr_interface.h>
+#include <cam_mem_mgr.h>
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+#include "cam_sensor_io.h"
+
+#define INVALID_VREG 100
+
+int cam_get_dt_power_setting_data(struct device_node *of_node,
+	struct cam_hw_soc_info *soc_info,
+	struct cam_sensor_power_ctrl_t *power_info);
+
+int msm_camera_pinctrl_init
+	(struct msm_pinctrl_info *sensor_pctrl, struct device *dev);
+
+int cam_sensor_i2c_command_parser(struct camera_io_master *io_master,
+	struct i2c_settings_array *i2c_reg_settings,
+	struct cam_cmd_buf_desc *cmd_desc, int32_t num_cmd_buffers);
+
+int cam_sensor_util_i2c_apply_setting(struct camera_io_master *io_master_info,
+	struct i2c_settings_list *i2c_list);
+
+int32_t delete_request(struct i2c_settings_array *i2c_array);
+int cam_sensor_util_request_gpio_table(
+	struct cam_hw_soc_info *soc_info, int gpio_en);
+
+int cam_sensor_util_init_gpio_pin_tbl(
+	struct cam_hw_soc_info *soc_info,
+	struct msm_camera_gpio_num_info **pgpio_num_info);
+int cam_sensor_core_power_up(struct cam_sensor_power_ctrl_t *ctrl,
+		struct cam_hw_soc_info *soc_info);
+
+int cam_sensor_util_power_down(struct cam_sensor_power_ctrl_t *ctrl,
+		struct cam_hw_soc_info *soc_info);
+
+int msm_camera_fill_vreg_params(struct cam_hw_soc_info *soc_info,
+	struct cam_sensor_power_setting *power_setting,
+	uint16_t power_setting_size);
+
+int32_t cam_sensor_update_power_settings(void *cmd_buf,
+	int cmd_length, struct cam_sensor_power_ctrl_t *power_info);
+
+int cam_sensor_bob_pwm_mode_switch(struct cam_hw_soc_info *soc_info,
+	int bob_reg_idx, bool flag);
+#endif /* _CAM_SENSOR_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_smmu/Makefile b/drivers/media/platform/msm/camera/cam_smmu/Makefile
new file mode 100644
index 0000000..7b82fd9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_smmu_api.o
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
new file mode 100644
index 0000000..3f4ef4e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -0,0 +1,3547 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <linux/workqueue.h>
+#include <linux/genalloc.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+#include <uapi/media/cam_req_mgr.h>
+#include "cam_smmu_api.h"
+#include "cam_debug_util.h"
+
+#define SHARED_MEM_POOL_GRANULARITY 16
+
+#define IOMMU_INVALID_DIR -1
+#define BYTE_SIZE 8
+#define COOKIE_NUM_BYTE 2
+#define COOKIE_SIZE (BYTE_SIZE*COOKIE_NUM_BYTE)
+#define COOKIE_MASK ((1<<COOKIE_SIZE)-1)
+#define HANDLE_INIT (-1)
+#define CAM_SMMU_CB_MAX 5
+
+#define GET_SMMU_HDL(x, y) (((x) << COOKIE_SIZE) | ((y) & COOKIE_MASK))
+#define GET_SMMU_TABLE_IDX(x) (((x) >> COOKIE_SIZE) & COOKIE_MASK)
+
+static int g_num_pf_handled = 4;
+module_param(g_num_pf_handled, int, 0644);
+
+struct firmware_alloc_info {
+	struct device *fw_dev;
+	void *fw_kva;
+	dma_addr_t fw_dma_hdl;
+};
+
+struct firmware_alloc_info icp_fw;
+
+struct cam_smmu_work_payload {
+	int idx;
+	struct iommu_domain *domain;
+	struct device *dev;
+	unsigned long iova;
+	int flags;
+	void *token;
+	struct list_head list;
+};
+
+enum cam_protection_type {
+	CAM_PROT_INVALID,
+	CAM_NON_SECURE,
+	CAM_SECURE,
+	CAM_PROT_MAX,
+};
+
+enum cam_iommu_type {
+	CAM_SMMU_INVALID,
+	CAM_QSMMU,
+	CAM_ARM_SMMU,
+	CAM_SMMU_MAX,
+};
+
+enum cam_smmu_buf_state {
+	CAM_SMMU_BUFF_EXIST,
+	CAM_SMMU_BUFF_NOT_EXIST,
+};
+
+enum cam_smmu_init_dir {
+	CAM_SMMU_TABLE_INIT,
+	CAM_SMMU_TABLE_DEINIT,
+};
+
+struct scratch_mapping {
+	void *bitmap;
+	size_t bits;
+	unsigned int order;
+	dma_addr_t base;
+};
+
+struct secheap_buf_info {
+	struct dma_buf *buf;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table;
+};
+
+struct cam_context_bank_info {
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	dma_addr_t va_start;
+	size_t va_len;
+	const char *name;
+	bool is_secure;
+	uint8_t scratch_buf_support;
+	uint8_t firmware_support;
+	uint8_t shared_support;
+	uint8_t io_support;
+	uint8_t secheap_support;
+	uint8_t qdss_support;
+	dma_addr_t qdss_phy_addr;
+	bool is_fw_allocated;
+	bool is_secheap_allocated;
+	bool is_qdss_allocated;
+
+	struct scratch_mapping scratch_map;
+	struct gen_pool *shared_mem_pool;
+
+	struct cam_smmu_region_info scratch_info;
+	struct cam_smmu_region_info firmware_info;
+	struct cam_smmu_region_info shared_info;
+	struct cam_smmu_region_info io_info;
+	struct cam_smmu_region_info secheap_info;
+	struct cam_smmu_region_info qdss_info;
+	struct secheap_buf_info secheap_buf;
+
+	struct list_head smmu_buf_list;
+	struct list_head smmu_buf_kernel_list;
+	struct mutex lock;
+	int handle;
+	enum cam_smmu_ops_param state;
+
+	cam_smmu_client_page_fault_handler handler[CAM_SMMU_CB_MAX];
+	void *token[CAM_SMMU_CB_MAX];
+	int cb_count;
+	int secure_count;
+	int pf_count;
+};
+
+struct cam_iommu_cb_set {
+	struct cam_context_bank_info *cb_info;
+	u32 cb_num;
+	u32 cb_init_count;
+	struct work_struct smmu_work;
+	struct mutex payload_list_lock;
+	struct list_head payload_list;
+	u32 non_fatal_fault;
+};
+
+static const struct of_device_id msm_cam_smmu_dt_match[] = {
+	{ .compatible = "qcom,msm-cam-smmu", },
+	{ .compatible = "qcom,msm-cam-smmu-cb", },
+	{ .compatible = "qcom,msm-cam-smmu-fw-dev", },
+	{}
+};
+
+struct cam_dma_buff_info {
+	struct dma_buf *buf;
+	struct dma_buf_attachment *attach;
+	struct sg_table *table;
+	enum dma_data_direction dir;
+	enum cam_smmu_region_id region_id;
+	int iommu_dir;
+	int ref_count;
+	dma_addr_t paddr;
+	struct list_head list;
+	int ion_fd;
+	size_t len;
+	size_t phys_len;
+};
+
+struct cam_sec_buff_info {
+	struct dma_buf *buf;
+	enum dma_data_direction dir;
+	int ref_count;
+	dma_addr_t paddr;
+	struct list_head list;
+	int ion_fd;
+	size_t len;
+};
+
+static const char *qdss_region_name = "qdss";
+
+static struct cam_iommu_cb_set iommu_cb_set;
+
+static enum dma_data_direction cam_smmu_translate_dir(
+	enum cam_smmu_map_dir dir);
+
+static int cam_smmu_check_handle_unique(int hdl);
+
+static int cam_smmu_create_iommu_handle(int idx);
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+	int *hdl);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+	int ion_fd);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
+	struct dma_buf *buf);
+
+static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
+	int ion_fd);
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+	dma_addr_t base, size_t size,
+	int order);
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+	size_t size,
+	dma_addr_t *iova);
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+	dma_addr_t addr, size_t size);
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+	dma_addr_t virt_addr);
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+	enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id);
+
+static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
+	struct dma_buf *buf, enum dma_data_direction dma_dir,
+	dma_addr_t *paddr_ptr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+	size_t virt_len,
+	size_t phys_len,
+	unsigned int iommu_dir,
+	dma_addr_t *virt_addr);
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+	struct cam_dma_buff_info *mapping_info, int idx);
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx);
+
+static void cam_smmu_clean_user_buffer_list(int idx);
+
+static void cam_smmu_clean_kernel_buffer_list(int idx);
+
+static void cam_smmu_print_user_list(int idx);
+
+static void cam_smmu_print_kernel_list(int idx);
+
+static void cam_smmu_print_table(void);
+
+static int cam_smmu_probe(struct platform_device *pdev);
+
+static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr);
+
+static void cam_smmu_page_fault_work(struct work_struct *work)
+{
+	int j;
+	int idx;
+	struct cam_smmu_work_payload *payload;
+	uint32_t buf_info;
+
+	mutex_lock(&iommu_cb_set.payload_list_lock);
+	if (list_empty(&iommu_cb_set.payload_list)) {
+		CAM_ERR(CAM_SMMU, "Payload list empty");
+		mutex_unlock(&iommu_cb_set.payload_list_lock);
+		return;
+	}
+
+	payload = list_first_entry(&iommu_cb_set.payload_list,
+		struct cam_smmu_work_payload,
+		list);
+	list_del(&payload->list);
+	mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+	/* Dereference the payload to call the handler */
+	idx = payload->idx;
+	buf_info = cam_smmu_find_closest_mapping(idx, (void *)payload->iova);
+	if (buf_info != 0)
+		CAM_INFO(CAM_SMMU, "closest buf 0x%x idx %d", buf_info, idx);
+
+	for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+		if ((iommu_cb_set.cb_info[idx].handler[j])) {
+			iommu_cb_set.cb_info[idx].handler[j](
+				payload->domain,
+				payload->dev,
+				payload->iova,
+				payload->flags,
+				iommu_cb_set.cb_info[idx].token[j],
+				buf_info);
+		}
+	}
+	kfree(payload);
+}
+
+static void cam_smmu_print_user_list(int idx)
+{
+	struct cam_dma_buff_info *mapping;
+
+	CAM_ERR(CAM_SMMU, "index = %d", idx);
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		CAM_ERR(CAM_SMMU,
+			"ion_fd = %d, paddr= 0x%pK, len = %u, region = %d",
+			 mapping->ion_fd, (void *)mapping->paddr,
+			 (unsigned int)mapping->len,
+			 mapping->region_id);
+	}
+}
+
+static void cam_smmu_print_kernel_list(int idx)
+{
+	struct cam_dma_buff_info *mapping;
+
+	CAM_ERR(CAM_SMMU, "index = %d", idx);
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+		CAM_ERR(CAM_SMMU,
+			"dma_buf = %pK, paddr= 0x%pK, len = %u, region = %d",
+			 mapping->buf, (void *)mapping->paddr,
+			 (unsigned int)mapping->len,
+			 mapping->region_id);
+	}
+}
+
+static void cam_smmu_print_table(void)
+{
+	int i;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		CAM_ERR(CAM_SMMU, "i= %d, handle= %d, name_addr=%pK", i,
+			   (int)iommu_cb_set.cb_info[i].handle,
+			   (void *)iommu_cb_set.cb_info[i].name);
+		CAM_ERR(CAM_SMMU, "dev = %pK", iommu_cb_set.cb_info[i].dev);
+	}
+}
+
+static uint32_t cam_smmu_find_closest_mapping(int idx, void *vaddr)
+{
+	struct cam_dma_buff_info *mapping, *closest_mapping =  NULL;
+	unsigned long start_addr, end_addr, current_addr;
+	uint32_t buf_handle = 0;
+
+	long delta = 0, lowest_delta = 0;
+
+	current_addr = (unsigned long)vaddr;
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		start_addr = (unsigned long)mapping->paddr;
+		end_addr = (unsigned long)mapping->paddr + mapping->len;
+
+		if (start_addr <= current_addr && current_addr <= end_addr) {
+			closest_mapping = mapping;
+			CAM_INFO(CAM_SMMU,
+				"Found va 0x%lx in:0x%lx-0x%lx, fd %d cb:%s",
+				current_addr, start_addr,
+				end_addr, mapping->ion_fd,
+				iommu_cb_set.cb_info[idx].name);
+			goto end;
+		} else {
+			if (start_addr > current_addr)
+				delta =  start_addr - current_addr;
+			else
+				delta = current_addr - end_addr - 1;
+
+			if (delta < lowest_delta || lowest_delta == 0) {
+				lowest_delta = delta;
+				closest_mapping = mapping;
+			}
+			CAM_DBG(CAM_SMMU,
+				"approx va %lx not in range: %lx-%lx fd = %0x",
+				current_addr, start_addr,
+				end_addr, mapping->ion_fd);
+		}
+	}
+
+end:
+	if (closest_mapping) {
+		buf_handle = GET_MEM_HANDLE(idx, closest_mapping->ion_fd);
+		CAM_INFO(CAM_SMMU,
+			"Closest map fd %d 0x%lx 0x%lx-0x%lx buf=%pK mem %0x",
+			closest_mapping->ion_fd, current_addr,
+			(unsigned long)closest_mapping->paddr,
+			(unsigned long)closest_mapping->paddr + mapping->len,
+			closest_mapping->buf,
+			buf_handle);
+	} else
+		CAM_INFO(CAM_SMMU,
+			"Cannot find vaddr:%lx in SMMU %s virt address",
+			current_addr, iommu_cb_set.cb_info[idx].name);
+
+	return buf_handle;
+}
+
+void cam_smmu_set_client_page_fault_handler(int handle,
+	cam_smmu_client_page_fault_handler handler_cb, void *token)
+{
+	int idx, i = 0;
+
+	if (!token || (handle == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
+		return;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return;
+	}
+
+	if (handler_cb) {
+		if (iommu_cb_set.cb_info[idx].cb_count == CAM_SMMU_CB_MAX) {
+			CAM_ERR(CAM_SMMU,
+				"%s Should not regiester more handlers",
+				iommu_cb_set.cb_info[idx].name);
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return;
+		}
+
+		iommu_cb_set.cb_info[idx].cb_count++;
+
+		for (i = 0; i < iommu_cb_set.cb_info[idx].cb_count; i++) {
+			if (iommu_cb_set.cb_info[idx].token[i] == NULL) {
+				iommu_cb_set.cb_info[idx].token[i] = token;
+				iommu_cb_set.cb_info[idx].handler[i] =
+					handler_cb;
+				break;
+			}
+		}
+	} else {
+		for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+			if (iommu_cb_set.cb_info[idx].token[i] == token) {
+				iommu_cb_set.cb_info[idx].token[i] = NULL;
+				iommu_cb_set.cb_info[idx].handler[i] =
+					NULL;
+				iommu_cb_set.cb_info[idx].cb_count--;
+				break;
+			}
+		}
+		if (i == CAM_SMMU_CB_MAX)
+			CAM_ERR(CAM_SMMU,
+				"Error: hdl %x no matching tokens: %s",
+				handle, iommu_cb_set.cb_info[idx].name);
+	}
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
+void cam_smmu_unset_client_page_fault_handler(int handle, void *token)
+{
+	int idx, i = 0;
+
+	if (!token || (handle == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: token is NULL or invalid handle");
+		return;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return;
+	}
+
+	for (i = 0; i < CAM_SMMU_CB_MAX; i++) {
+		if (iommu_cb_set.cb_info[idx].token[i] == token) {
+			iommu_cb_set.cb_info[idx].token[i] = NULL;
+			iommu_cb_set.cb_info[idx].handler[i] =
+				NULL;
+			iommu_cb_set.cb_info[idx].cb_count--;
+			break;
+		}
+	}
+	if (i == CAM_SMMU_CB_MAX)
+		CAM_ERR(CAM_SMMU, "Error: hdl %x no matching tokens: %s",
+			handle, iommu_cb_set.cb_info[idx].name);
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+}
+
+static int cam_smmu_iommu_fault_handler(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova,
+	int flags, void *token)
+{
+	char *cb_name;
+	int idx;
+	struct cam_smmu_work_payload *payload;
+
+	if (!token) {
+		CAM_ERR(CAM_SMMU, "Error: token is NULL");
+		CAM_ERR(CAM_SMMU, "Error: domain = %pK, device = %pK",
+			domain, dev);
+		CAM_ERR(CAM_SMMU, "iova = %lX, flags = %d", iova, flags);
+		return -EINVAL;
+	}
+
+	cb_name = (char *)token;
+	/* Check whether it is in the table */
+	for (idx = 0; idx < iommu_cb_set.cb_num; idx++) {
+		if (!strcmp(iommu_cb_set.cb_info[idx].name, cb_name))
+			break;
+	}
+
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: index is not valid, index = %d, token = %s",
+			idx, cb_name);
+		return -EINVAL;
+	}
+
+	if (++iommu_cb_set.cb_info[idx].pf_count > g_num_pf_handled) {
+		CAM_INFO_RATE_LIMIT(CAM_SMMU, "PF already handled %d %d %d",
+			g_num_pf_handled, idx,
+			iommu_cb_set.cb_info[idx].pf_count);
+		return -EINVAL;
+	}
+
+	payload = kzalloc(sizeof(struct cam_smmu_work_payload), GFP_ATOMIC);
+	if (!payload)
+		return -EINVAL;
+
+	payload->domain = domain;
+	payload->dev = dev;
+	payload->iova = iova;
+	payload->flags = flags;
+	payload->token = token;
+	payload->idx = idx;
+
+	mutex_lock(&iommu_cb_set.payload_list_lock);
+	list_add_tail(&payload->list, &iommu_cb_set.payload_list);
+	mutex_unlock(&iommu_cb_set.payload_list_lock);
+
+	cam_smmu_page_fault_work(&iommu_cb_set.smmu_work);
+
+	return -EINVAL;
+}
+
+static int cam_smmu_translate_dir_to_iommu_dir(
+	enum cam_smmu_map_dir dir)
+{
+	switch (dir) {
+	case CAM_SMMU_MAP_READ:
+		return IOMMU_READ;
+	case CAM_SMMU_MAP_WRITE:
+		return IOMMU_WRITE;
+	case CAM_SMMU_MAP_RW:
+		return IOMMU_READ|IOMMU_WRITE;
+	case CAM_SMMU_MAP_INVALID:
+	default:
+		CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d", dir);
+		break;
+	};
+	return IOMMU_INVALID_DIR;
+}
+
+static enum dma_data_direction cam_smmu_translate_dir(
+	enum cam_smmu_map_dir dir)
+{
+	switch (dir) {
+	case CAM_SMMU_MAP_READ:
+		return DMA_FROM_DEVICE;
+	case CAM_SMMU_MAP_WRITE:
+		return DMA_TO_DEVICE;
+	case CAM_SMMU_MAP_RW:
+		return DMA_BIDIRECTIONAL;
+	case CAM_SMMU_MAP_INVALID:
+	default:
+		CAM_ERR(CAM_SMMU, "Error: Direction is invalid. dir = %d",
+			(int)dir);
+		break;
+	}
+	return DMA_NONE;
+}
+
+void cam_smmu_reset_iommu_table(enum cam_smmu_init_dir ops)
+{
+	unsigned int i;
+	int j = 0;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		iommu_cb_set.cb_info[i].handle = HANDLE_INIT;
+		INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_list);
+		INIT_LIST_HEAD(&iommu_cb_set.cb_info[i].smmu_buf_kernel_list);
+		iommu_cb_set.cb_info[i].state = CAM_SMMU_DETACH;
+		iommu_cb_set.cb_info[i].dev = NULL;
+		iommu_cb_set.cb_info[i].cb_count = 0;
+		iommu_cb_set.cb_info[i].pf_count = 0;
+		for (j = 0; j < CAM_SMMU_CB_MAX; j++) {
+			iommu_cb_set.cb_info[i].token[j] = NULL;
+			iommu_cb_set.cb_info[i].handler[j] = NULL;
+		}
+		if (ops == CAM_SMMU_TABLE_INIT)
+			mutex_init(&iommu_cb_set.cb_info[i].lock);
+		else
+			mutex_destroy(&iommu_cb_set.cb_info[i].lock);
+	}
+}
+
+static int cam_smmu_check_handle_unique(int hdl)
+{
+	int i;
+
+	if (hdl == HANDLE_INIT) {
+		CAM_DBG(CAM_SMMU,
+			"iommu handle is init number. Need to try again");
+		return 1;
+	}
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		if (iommu_cb_set.cb_info[i].handle == HANDLE_INIT)
+			continue;
+
+		if (iommu_cb_set.cb_info[i].handle == hdl) {
+			CAM_DBG(CAM_SMMU, "iommu handle %d conflicts",
+				(int)hdl);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+/**
+ *  use low 2 bytes for handle cookie
+ */
+static int cam_smmu_create_iommu_handle(int idx)
+{
+	int rand, hdl = 0;
+
+	get_random_bytes(&rand, COOKIE_NUM_BYTE);
+	hdl = GET_SMMU_HDL(idx, rand);
+	CAM_DBG(CAM_SMMU, "create handle value = %x", (int)hdl);
+	return hdl;
+}
+
+static int cam_smmu_attach_device(int idx)
+{
+	int rc;
+	struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+	/* attach the mapping to device */
+	rc = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU, "Error: ARM IOMMU attach failed. ret = %d",
+			rc);
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+static int cam_smmu_create_add_handle_in_table(char *name,
+	int *hdl)
+{
+	int i;
+	int handle;
+
+	/* create handle and add in the iommu hardware table */
+	for (i = 0; i < iommu_cb_set.cb_num; i++) {
+		if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
+			mutex_lock(&iommu_cb_set.cb_info[i].lock);
+			if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
+				CAM_ERR(CAM_SMMU,
+					"Error: %s already got handle 0x%x",
+					name,
+					iommu_cb_set.cb_info[i].handle);
+
+				if (iommu_cb_set.cb_info[i].is_secure)
+					iommu_cb_set.cb_info[i].secure_count++;
+
+				mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+				if (iommu_cb_set.cb_info[i].is_secure) {
+					*hdl = iommu_cb_set.cb_info[i].handle;
+					return 0;
+				}
+				return -EINVAL;
+			}
+
+			/* make sure handle is unique */
+			do {
+				handle = cam_smmu_create_iommu_handle(i);
+			} while (cam_smmu_check_handle_unique(handle));
+
+			/* put handle in the table */
+			iommu_cb_set.cb_info[i].handle = handle;
+			iommu_cb_set.cb_info[i].cb_count = 0;
+			if (iommu_cb_set.cb_info[i].is_secure)
+				iommu_cb_set.cb_info[i].secure_count++;
+			*hdl = handle;
+			CAM_DBG(CAM_SMMU, "%s creates handle 0x%x",
+				name, handle);
+			mutex_unlock(&iommu_cb_set.cb_info[i].lock);
+			return 0;
+		}
+	}
+
+	CAM_ERR(CAM_SMMU, "Error: Cannot find name %s or all handle exist",
+		name);
+	cam_smmu_print_table();
+	return -EINVAL;
+}
+
+static int cam_smmu_init_scratch_map(struct scratch_mapping *scratch_map,
+					dma_addr_t base, size_t size,
+					int order)
+{
+	unsigned int count = size >> (PAGE_SHIFT + order);
+	unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
+	int err = 0;
+
+	if (!count) {
+		err = -EINVAL;
+		CAM_ERR(CAM_SMMU, "Page count is zero, size passed = %zu",
+			size);
+		goto bail;
+	}
+
+	scratch_map->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!scratch_map->bitmap) {
+		err = -ENOMEM;
+		goto bail;
+	}
+
+	scratch_map->base = base;
+	scratch_map->bits = BITS_PER_BYTE * bitmap_size;
+	scratch_map->order = order;
+
+bail:
+	return err;
+}
+
+static int cam_smmu_alloc_scratch_va(struct scratch_mapping *mapping,
+	size_t size,
+	dma_addr_t *iova)
+{
+	unsigned int order = get_order(size);
+	unsigned int align = 0;
+	unsigned int count, start;
+
+	count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
+		 (1 << mapping->order) - 1) >> mapping->order;
+
+	/*
+	 * Transparently, add a guard page to the total count of pages
+	 * to be allocated
+	 */
+	count++;
+
+	if (order > mapping->order)
+		align = (1 << (order - mapping->order)) - 1;
+
+	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+					   count, align);
+
+	if (start > mapping->bits)
+		return -ENOMEM;
+
+	bitmap_set(mapping->bitmap, start, count);
+	*iova = mapping->base + (start << (mapping->order + PAGE_SHIFT));
+
+	return 0;
+}
+
+static int cam_smmu_free_scratch_va(struct scratch_mapping *mapping,
+	dma_addr_t addr, size_t size)
+{
+	unsigned int start = (addr - mapping->base) >>
+			     (mapping->order + PAGE_SHIFT);
+	unsigned int count = ((size >> PAGE_SHIFT) +
+			      (1 << mapping->order) - 1) >> mapping->order;
+
+	if (!addr) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid address");
+		return -EINVAL;
+	}
+
+	if (start + count > mapping->bits) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid page bits in scratch map");
+		return -EINVAL;
+	}
+
+	/*
+	 * Transparently, add a guard page to the total count of pages
+	 * to be freed
+	 */
+	count++;
+	bitmap_clear(mapping->bitmap, start, count);
+
+	return 0;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_virt_address(int idx,
+	dma_addr_t virt_addr)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->paddr == virt_addr) {
+			CAM_DBG(CAM_SMMU, "Found virtual address %lx",
+				 (unsigned long)virt_addr);
+			return mapping;
+		}
+	}
+
+	CAM_ERR(CAM_SMMU, "Error: Cannot find virtual address %lx by index %d",
+		(unsigned long)virt_addr, idx);
+	return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_ion_index(int idx,
+	int ion_fd)
+{
+	struct cam_dma_buff_info *mapping;
+
+	if (ion_fd < 0) {
+		CAM_ERR(CAM_SMMU, "Invalid fd %d", ion_fd);
+		return NULL;
+	}
+
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->ion_fd == ion_fd) {
+			CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
+			return mapping;
+		}
+	}
+
+	CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
+
+	return NULL;
+}
+
+static struct cam_dma_buff_info *cam_smmu_find_mapping_by_dma_buf(int idx,
+	struct dma_buf *buf)
+{
+	struct cam_dma_buff_info *mapping;
+
+	if (!buf) {
+		CAM_ERR(CAM_SMMU, "Invalid dma_buf");
+		return NULL;
+	}
+
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list,
+			list) {
+		if (mapping->buf == buf) {
+			CAM_DBG(CAM_SMMU, "find dma_buf %pK", buf);
+			return mapping;
+		}
+	}
+
+	CAM_ERR(CAM_SMMU, "Error: Cannot find entry by index %d", idx);
+
+	return NULL;
+}
+
+static struct cam_sec_buff_info *cam_smmu_find_mapping_by_sec_buf_idx(int idx,
+	int ion_fd)
+{
+	struct cam_sec_buff_info *mapping;
+
+	list_for_each_entry(mapping, &iommu_cb_set.cb_info[idx].smmu_buf_list,
+		list) {
+		if (mapping->ion_fd == ion_fd) {
+			CAM_DBG(CAM_SMMU, "find ion_fd %d", ion_fd);
+			return mapping;
+		}
+	}
+	CAM_ERR(CAM_SMMU, "Error: Cannot find fd %d by index %d",
+		ion_fd, idx);
+	return NULL;
+}
+
+static void cam_smmu_clean_user_buffer_list(int idx)
+{
+	int ret;
+	struct cam_dma_buff_info *mapping_info, *temp;
+
+	list_for_each_entry_safe(mapping_info, temp,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		CAM_DBG(CAM_SMMU, "Free mapping address %pK, i = %d, fd = %d",
+			(void *)mapping_info->paddr, idx,
+			mapping_info->ion_fd);
+
+		if (mapping_info->ion_fd == 0xDEADBEEF)
+			/* Clean up scratch buffers */
+			ret = cam_smmu_free_scratch_buffer_remove_from_list(
+							mapping_info, idx);
+		else
+			/* Clean up regular mapped buffers */
+			ret = cam_smmu_unmap_buf_and_remove_from_list(
+					mapping_info,
+					idx);
+
+		if (ret < 0) {
+			CAM_ERR(CAM_SMMU, "Buffer delete failed: idx = %d",
+				idx);
+			CAM_ERR(CAM_SMMU,
+				"Buffer delete failed: addr = %lx, fd = %d",
+				(unsigned long)mapping_info->paddr,
+				mapping_info->ion_fd);
+			/*
+			 * Ignore this error and continue to delete other
+			 * buffers in the list
+			 */
+			continue;
+		}
+	}
+}
+
+static void cam_smmu_clean_kernel_buffer_list(int idx)
+{
+	int ret;
+	struct cam_dma_buff_info *mapping_info, *temp;
+
+	list_for_each_entry_safe(mapping_info, temp,
+			&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+		CAM_DBG(CAM_SMMU,
+			"Free mapping address %pK, i = %d, dma_buf = %pK",
+			(void *)mapping_info->paddr, idx,
+			mapping_info->buf);
+
+		/* Clean up regular mapped buffers */
+		ret = cam_smmu_unmap_buf_and_remove_from_list(
+				mapping_info,
+				idx);
+
+		if (ret < 0) {
+			CAM_ERR(CAM_SMMU,
+				"Buffer delete in kernel list failed: idx = %d",
+				idx);
+			CAM_ERR(CAM_SMMU,
+				"Buffer delete failed: addr = %lx, dma_buf = %pK",
+				(unsigned long)mapping_info->paddr,
+				mapping_info->buf);
+			/*
+			 * Ignore this error and continue to delete other
+			 * buffers in the list
+			 */
+			continue;
+		}
+	}
+}
+
+static int cam_smmu_attach(int idx)
+{
+	int ret;
+
+	if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+		ret = -EALREADY;
+	} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+		ret = cam_smmu_attach_device(idx);
+		if (ret < 0) {
+			CAM_ERR(CAM_SMMU, "Error: ATTACH fail");
+			return -ENODEV;
+		}
+		iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
+		ret = 0;
+	} else {
+		CAM_ERR(CAM_SMMU, "Error: Not detach/attach: %d",
+			iommu_cb_set.cb_info[idx].state);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int cam_smmu_detach_device(int idx)
+{
+	int rc = 0;
+	struct cam_context_bank_info *cb = &iommu_cb_set.cb_info[idx];
+
+	/* detach the mapping to device if not already detached */
+	if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_DETACH) {
+		rc = -EALREADY;
+	} else if (iommu_cb_set.cb_info[idx].state == CAM_SMMU_ATTACH) {
+		arm_iommu_detach_device(cb->dev);
+		iommu_cb_set.cb_info[idx].state = CAM_SMMU_DETACH;
+	}
+
+	return rc;
+}
+
+static int cam_smmu_alloc_iova(size_t size,
+	int32_t smmu_hdl, uint32_t *iova)
+{
+	int rc = 0;
+	int idx;
+	uint32_t vaddr = 0;
+
+	if (!iova || !size || (smmu_hdl == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_SMMU, "Allocating iova size = %zu for smmu hdl=%X",
+		size, smmu_hdl);
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].shared_support) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Shared memory not supported for hdl = %X",
+			smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	vaddr = gen_pool_alloc(iommu_cb_set.cb_info[idx].shared_mem_pool, size);
+	if (!vaddr)
+		return -ENOMEM;
+
+	*iova = vaddr;
+
+get_addr_end:
+	return rc;
+}
+
+static int cam_smmu_free_iova(uint32_t addr, size_t size,
+	int32_t smmu_hdl)
+{
+	int rc = 0;
+	int idx;
+
+	if (!size || (smmu_hdl == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != smmu_hdl) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, smmu_hdl);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	gen_pool_free(iommu_cb_set.cb_info[idx].shared_mem_pool, addr, size);
+
+get_addr_end:
+	return rc;
+}
+
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+	dma_addr_t *iova,
+	uintptr_t *cpuva,
+	size_t *len)
+{
+	int rc;
+	int32_t idx;
+	size_t firmware_len = 0;
+	size_t firmware_start = 0;
+	struct iommu_domain *domain;
+
+	if (!iova || !len || !cpuva || (smmu_hdl == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].firmware_support) {
+		CAM_ERR(CAM_SMMU,
+			"Firmware memory not supported for this SMMU handle");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_fw_allocated) {
+		CAM_ERR(CAM_SMMU, "Trying to allocate twice");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	}
+
+	firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+	firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	CAM_DBG(CAM_SMMU, "Firmware area len from DT = %zu", firmware_len);
+
+	icp_fw.fw_kva = dma_alloc_coherent(icp_fw.fw_dev,
+		firmware_len,
+		&icp_fw.fw_dma_hdl,
+		GFP_KERNEL);
+	if (!icp_fw.fw_kva) {
+		CAM_ERR(CAM_SMMU, "FW memory alloc failed");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	} else {
+		CAM_DBG(CAM_SMMU, "DMA alloc returned fw = %pK, hdl = %pK",
+			icp_fw.fw_kva, (void *)icp_fw.fw_dma_hdl);
+	}
+
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+	rc = iommu_map(domain,
+		firmware_start,
+		icp_fw.fw_dma_hdl,
+		firmware_len,
+		IOMMU_READ|IOMMU_WRITE|IOMMU_PRIV);
+
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "Failed to map FW into IOMMU");
+		rc = -ENOMEM;
+		goto alloc_fail;
+	}
+	iommu_cb_set.cb_info[idx].is_fw_allocated = true;
+
+	*iova = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	*cpuva = (uintptr_t)icp_fw.fw_kva;
+	*len = firmware_len;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+	return rc;
+
+alloc_fail:
+	dma_free_coherent(icp_fw.fw_dev,
+		firmware_len,
+		icp_fw.fw_kva,
+		icp_fw.fw_dma_hdl);
+unlock_and_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_alloc_firmware);
+
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl)
+{
+	int rc = 0;
+	int32_t idx;
+	size_t firmware_len = 0;
+	size_t firmware_start = 0;
+	struct iommu_domain *domain;
+	size_t unmapped = 0;
+
+	if (smmu_hdl == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].firmware_support) {
+		CAM_ERR(CAM_SMMU,
+			"Firmware memory not supported for this SMMU handle");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (!iommu_cb_set.cb_info[idx].is_fw_allocated) {
+		CAM_ERR(CAM_SMMU,
+			"Trying to deallocate firmware that is not allocated");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	}
+
+	firmware_len = iommu_cb_set.cb_info[idx].firmware_info.iova_len;
+	firmware_start = iommu_cb_set.cb_info[idx].firmware_info.iova_start;
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+	unmapped = iommu_unmap(domain,
+		firmware_start,
+		firmware_len);
+
+	if (unmapped != firmware_len) {
+		CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu",
+			unmapped,
+			firmware_len);
+		rc = -EINVAL;
+	}
+
+	dma_free_coherent(icp_fw.fw_dev,
+		firmware_len,
+		icp_fw.fw_kva,
+		icp_fw.fw_dma_hdl);
+
+	icp_fw.fw_kva = 0;
+	icp_fw.fw_dma_hdl = 0;
+
+	iommu_cb_set.cb_info[idx].is_fw_allocated = false;
+
+unlock_and_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_dealloc_firmware);
+
+int cam_smmu_alloc_qdss(int32_t smmu_hdl,
+	dma_addr_t *iova,
+	size_t *len)
+{
+	int rc;
+	int32_t idx;
+	size_t qdss_len = 0;
+	size_t qdss_start = 0;
+	dma_addr_t qdss_phy_addr;
+	struct iommu_domain *domain;
+
+	if (!iova || !len || (smmu_hdl == HANDLE_INIT)) {
+		CAM_ERR(CAM_SMMU, "Error: Input args are invalid");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].qdss_support) {
+		CAM_ERR(CAM_SMMU,
+			"QDSS memory not supported for this SMMU handle");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_qdss_allocated) {
+		CAM_ERR(CAM_SMMU, "Trying to allocate twice");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	}
+
+	qdss_len = iommu_cb_set.cb_info[idx].qdss_info.iova_len;
+	qdss_start = iommu_cb_set.cb_info[idx].qdss_info.iova_start;
+	qdss_phy_addr = iommu_cb_set.cb_info[idx].qdss_phy_addr;
+	CAM_DBG(CAM_SMMU, "QDSS area len from DT = %zu", qdss_len);
+
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+	rc = iommu_map(domain,
+		qdss_start,
+		qdss_phy_addr,
+		qdss_len,
+		IOMMU_READ|IOMMU_WRITE);
+
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "Failed to map QDSS into IOMMU");
+		goto unlock_and_end;
+	}
+
+	iommu_cb_set.cb_info[idx].is_qdss_allocated = true;
+
+	*iova = iommu_cb_set.cb_info[idx].qdss_info.iova_start;
+	*len = qdss_len;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+	return rc;
+
+unlock_and_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_alloc_qdss);
+
+int cam_smmu_dealloc_qdss(int32_t smmu_hdl)
+{
+	int rc = 0;
+	int32_t idx;
+	size_t qdss_len = 0;
+	size_t qdss_start = 0;
+	struct iommu_domain *domain;
+	size_t unmapped = 0;
+
+	if (smmu_hdl == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].qdss_support) {
+		CAM_ERR(CAM_SMMU,
+			"QDSS memory not supported for this SMMU handle");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (!iommu_cb_set.cb_info[idx].is_qdss_allocated) {
+		CAM_ERR(CAM_SMMU,
+			"Trying to deallocate qdss that is not allocated");
+		rc = -ENOMEM;
+		goto unlock_and_end;
+	}
+
+	qdss_len = iommu_cb_set.cb_info[idx].qdss_info.iova_len;
+	qdss_start = iommu_cb_set.cb_info[idx].qdss_info.iova_start;
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+	unmapped = iommu_unmap(domain, qdss_start, qdss_len);
+
+	if (unmapped != qdss_len) {
+		CAM_ERR(CAM_SMMU, "Only %zu unmapped out of total %zu",
+			unmapped,
+			qdss_len);
+		rc = -EINVAL;
+	}
+
+	iommu_cb_set.cb_info[idx].is_qdss_allocated = false;
+
+unlock_and_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+end:
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_dealloc_qdss);
+
+int cam_smmu_get_region_info(int32_t smmu_hdl,
+	enum cam_smmu_region_id region_id,
+	struct cam_smmu_region_info *region_info)
+{
+	int32_t idx;
+	struct cam_context_bank_info *cb = NULL;
+
+	if (!region_info) {
+		CAM_ERR(CAM_SMMU, "Invalid region_info pointer");
+		return -EINVAL;
+	}
+
+	if (smmu_hdl == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Invalid handle");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU, "Handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	cb = &iommu_cb_set.cb_info[idx];
+	if (!cb) {
+		CAM_ERR(CAM_SMMU, "SMMU context bank pointer invalid");
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	switch (region_id) {
+	case CAM_SMMU_REGION_FIRMWARE:
+		if (!cb->firmware_support) {
+			CAM_ERR(CAM_SMMU, "Firmware not supported");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->firmware_info.iova_start;
+		region_info->iova_len = cb->firmware_info.iova_len;
+		break;
+	case CAM_SMMU_REGION_SHARED:
+		if (!cb->shared_support) {
+			CAM_ERR(CAM_SMMU, "Shared mem not supported");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->shared_info.iova_start;
+		region_info->iova_len = cb->shared_info.iova_len;
+		break;
+	case CAM_SMMU_REGION_SCRATCH:
+		if (!cb->scratch_buf_support) {
+			CAM_ERR(CAM_SMMU, "Scratch memory not supported");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->scratch_info.iova_start;
+		region_info->iova_len = cb->scratch_info.iova_len;
+		break;
+	case CAM_SMMU_REGION_IO:
+		if (!cb->io_support) {
+			CAM_ERR(CAM_SMMU, "IO memory not supported");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->io_info.iova_start;
+		region_info->iova_len = cb->io_info.iova_len;
+		break;
+	case CAM_SMMU_REGION_SECHEAP:
+		if (!cb->secheap_support) {
+			CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -ENODEV;
+		}
+		region_info->iova_start = cb->secheap_info.iova_start;
+		region_info->iova_len = cb->secheap_info.iova_len;
+		break;
+	default:
+		CAM_ERR(CAM_SMMU, "Invalid region id: %d for smmu hdl: %X",
+			smmu_hdl, region_id);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return 0;
+}
+EXPORT_SYMBOL(cam_smmu_get_region_info);
+
+int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
+	struct dma_buf *buf,
+	dma_addr_t *iova,
+	size_t *request_len)
+{
+	struct secheap_buf_info *secheap_buf = NULL;
+	size_t size = 0;
+	uint32_t sec_heap_iova = 0;
+	size_t sec_heap_iova_len = 0;
+	int idx;
+	int rc = 0;
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].secheap_support) {
+		CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+
+	if (iommu_cb_set.cb_info[idx].is_secheap_allocated) {
+		CAM_ERR(CAM_SMMU, "Trying to allocate secheap twice");
+		rc = -ENOMEM;
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return rc;
+	}
+
+	if (IS_ERR_OR_NULL(buf)) {
+		rc = PTR_ERR(buf);
+		CAM_ERR(CAM_SMMU,
+			"Error: dma get buf failed. rc = %d", rc);
+		goto err_out;
+	}
+
+	secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
+	secheap_buf->buf = buf;
+	secheap_buf->attach = dma_buf_attach(secheap_buf->buf,
+		iommu_cb_set.cb_info[idx].dev);
+	if (IS_ERR_OR_NULL(secheap_buf->attach)) {
+		rc = PTR_ERR(secheap_buf->attach);
+		CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
+		goto err_put;
+	}
+
+	secheap_buf->table = dma_buf_map_attachment(secheap_buf->attach,
+		DMA_BIDIRECTIONAL);
+	if (IS_ERR_OR_NULL(secheap_buf->table)) {
+		rc = PTR_ERR(secheap_buf->table);
+		CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
+		goto err_detach;
+	}
+
+	sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
+	sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
+	size = iommu_map_sg(iommu_cb_set.cb_info[idx].mapping->domain,
+		sec_heap_iova,
+		secheap_buf->table->sgl,
+		secheap_buf->table->nents,
+		IOMMU_READ | IOMMU_WRITE);
+	if (size != sec_heap_iova_len) {
+		CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
+		goto err_unmap_sg;
+	}
+
+	iommu_cb_set.cb_info[idx].is_secheap_allocated = true;
+	*iova = (uint32_t)sec_heap_iova;
+	*request_len = sec_heap_iova_len;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+	return rc;
+
+err_unmap_sg:
+	dma_buf_unmap_attachment(secheap_buf->attach,
+		secheap_buf->table,
+		DMA_BIDIRECTIONAL);
+err_detach:
+	dma_buf_detach(secheap_buf->buf,
+		secheap_buf->attach);
+err_put:
+	dma_buf_put(secheap_buf->buf);
+err_out:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_reserve_sec_heap);
+
+int cam_smmu_release_sec_heap(int32_t smmu_hdl)
+{
+	int idx;
+	size_t size = 0;
+	uint32_t sec_heap_iova = 0;
+	size_t sec_heap_iova_len = 0;
+	struct secheap_buf_info *secheap_buf = NULL;
+
+	idx = GET_SMMU_TABLE_IDX(smmu_hdl);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, smmu_hdl);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].secheap_support) {
+		CAM_ERR(CAM_SMMU, "Secondary heap not supported");
+		return -EINVAL;
+	}
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+
+	if (!iommu_cb_set.cb_info[idx].is_secheap_allocated) {
+		CAM_ERR(CAM_SMMU, "Trying to release secheap twice");
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -ENOMEM;
+	}
+
+	secheap_buf = &iommu_cb_set.cb_info[idx].secheap_buf;
+	sec_heap_iova = iommu_cb_set.cb_info[idx].secheap_info.iova_start;
+	sec_heap_iova_len = iommu_cb_set.cb_info[idx].secheap_info.iova_len;
+
+	size = iommu_unmap(iommu_cb_set.cb_info[idx].mapping->domain,
+		sec_heap_iova,
+		sec_heap_iova_len);
+	if (size != sec_heap_iova_len) {
+		CAM_ERR(CAM_SMMU, "Failed: Unmapped = %zu, requested = %zu",
+			size,
+			sec_heap_iova_len);
+	}
+
+	dma_buf_unmap_attachment(secheap_buf->attach,
+		secheap_buf->table, DMA_BIDIRECTIONAL);
+	dma_buf_detach(secheap_buf->buf, secheap_buf->attach);
+	dma_buf_put(secheap_buf->buf);
+	iommu_cb_set.cb_info[idx].is_secheap_allocated = false;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(cam_smmu_release_sec_heap);
+
+static int cam_smmu_map_buffer_validate(struct dma_buf *buf,
+	int idx, enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id,
+	struct cam_dma_buff_info **mapping_info)
+{
+	struct dma_buf_attachment *attach = NULL;
+	struct sg_table *table = NULL;
+	struct iommu_domain *domain;
+	size_t size = 0;
+	uint32_t iova = 0;
+	int rc = 0;
+
+	if (IS_ERR_OR_NULL(buf)) {
+		rc = PTR_ERR(buf);
+		CAM_ERR(CAM_SMMU,
+			"Error: dma get buf failed. rc = %d", rc);
+		goto err_out;
+	}
+
+	if (!mapping_info) {
+		rc = -EINVAL;
+		CAM_ERR(CAM_SMMU, "Error: mapping_info is invalid");
+		goto err_out;
+	}
+
+	attach = dma_buf_attach(buf, iommu_cb_set.cb_info[idx].dev);
+	if (IS_ERR_OR_NULL(attach)) {
+		rc = PTR_ERR(attach);
+		CAM_ERR(CAM_SMMU, "Error: dma buf attach failed");
+		goto err_put;
+	}
+
+	if (region_id == CAM_SMMU_REGION_SHARED) {
+		table = dma_buf_map_attachment(attach, dma_dir);
+		if (IS_ERR_OR_NULL(table)) {
+			rc = PTR_ERR(table);
+			CAM_ERR(CAM_SMMU, "Error: dma map attachment failed");
+			goto err_detach;
+		}
+
+		domain = iommu_cb_set.cb_info[idx].mapping->domain;
+		if (!domain) {
+			CAM_ERR(CAM_SMMU, "CB has no domain set");
+			goto err_unmap_sg;
+		}
+
+		rc = cam_smmu_alloc_iova(*len_ptr,
+			iommu_cb_set.cb_info[idx].handle,
+			&iova);
+
+		if (rc < 0) {
+			CAM_ERR(CAM_SMMU,
+				"IOVA alloc failed for shared memory, size=%zu, idx=%d, handle=%d",
+				*len_ptr, idx,
+				iommu_cb_set.cb_info[idx].handle);
+			goto err_unmap_sg;
+		}
+
+		size = iommu_map_sg(domain, iova, table->sgl, table->nents,
+				IOMMU_READ | IOMMU_WRITE);
+
+		if (size < 0) {
+			CAM_ERR(CAM_SMMU, "IOMMU mapping failed");
+			rc = cam_smmu_free_iova(iova,
+				size, iommu_cb_set.cb_info[idx].handle);
+			if (rc)
+				CAM_ERR(CAM_SMMU, "IOVA free failed");
+			rc = -ENOMEM;
+			goto err_unmap_sg;
+		} else {
+			CAM_DBG(CAM_SMMU,
+				"iommu_map_sg returned iova=%pK, size=%zu",
+				iova, size);
+			*paddr_ptr = iova;
+			*len_ptr = size;
+		}
+	} else if (region_id == CAM_SMMU_REGION_IO) {
+		attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
+
+		table = dma_buf_map_attachment(attach, dma_dir);
+		if (IS_ERR_OR_NULL(table)) {
+			rc = PTR_ERR(table);
+			CAM_ERR(CAM_SMMU, "Error: dma map attachment failed");
+			goto err_detach;
+		}
+
+		*paddr_ptr = sg_dma_address(table->sgl);
+		*len_ptr = (size_t)buf->size;
+	} else {
+		CAM_ERR(CAM_SMMU, "Error: Wrong region id passed");
+		rc = -EINVAL;
+		goto err_unmap_sg;
+	}
+
+	CAM_DBG(CAM_SMMU, "iova=%pK, region_id=%d, paddr=%pK, len=%d",
+		iova, region_id, *paddr_ptr, *len_ptr);
+
+	if (table->sgl) {
+		CAM_DBG(CAM_SMMU,
+			"DMA buf: %pK, device: %pK, attach: %pK, table: %pK",
+			(void *)buf,
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)attach, (void *)table);
+		CAM_DBG(CAM_SMMU, "table sgl: %pK, rc: %d, dma_address: 0x%x",
+			(void *)table->sgl, rc,
+			(unsigned int)table->sgl->dma_address);
+	} else {
+		rc = -EINVAL;
+		CAM_ERR(CAM_SMMU, "Error: table sgl is null");
+		goto err_unmap_sg;
+	}
+
+	/* fill up mapping_info */
+	*mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+	if (!(*mapping_info)) {
+		rc = -ENOSPC;
+		goto err_alloc;
+	}
+
+	(*mapping_info)->buf = buf;
+	(*mapping_info)->attach = attach;
+	(*mapping_info)->table = table;
+	(*mapping_info)->paddr = *paddr_ptr;
+	(*mapping_info)->len = *len_ptr;
+	(*mapping_info)->dir = dma_dir;
+	(*mapping_info)->ref_count = 1;
+	(*mapping_info)->region_id = region_id;
+
+	if (!*paddr_ptr || !*len_ptr) {
+		CAM_ERR(CAM_SMMU, "Error: Space Allocation failed");
+		kfree(*mapping_info);
+		*mapping_info = NULL;
+		rc = -ENOSPC;
+		goto err_alloc;
+	}
+	CAM_DBG(CAM_SMMU, "idx=%d, dma_buf=%pK, dev=%pK, paddr=%pK, len=%u",
+		idx, buf, (void *)iommu_cb_set.cb_info[idx].dev,
+		(void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+	return 0;
+
+err_alloc:
+	if (region_id == CAM_SMMU_REGION_SHARED) {
+		cam_smmu_free_iova(iova,
+			size,
+			iommu_cb_set.cb_info[idx].handle);
+
+		iommu_unmap(iommu_cb_set.cb_info[idx].mapping->domain,
+			*paddr_ptr,
+			*len_ptr);
+	}
+err_unmap_sg:
+	dma_buf_unmap_attachment(attach, table, dma_dir);
+err_detach:
+	dma_buf_detach(buf, attach);
+err_put:
+	dma_buf_put(buf);
+err_out:
+	return rc;
+}
+
+
+static int cam_smmu_map_buffer_and_add_to_list(int idx, int ion_fd,
+	 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+	 size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info = NULL;
+	struct dma_buf *buf = NULL;
+
+	/* returns the dma_buf structure related to an fd */
+	buf = dma_buf_get(ion_fd);
+
+	rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
+		region_id, &mapping_info);
+
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "buffer validation failure");
+		return rc;
+	}
+
+	mapping_info->ion_fd = ion_fd;
+	/* add to the list */
+	list_add(&mapping_info->list,
+		&iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+	return 0;
+}
+
+static int cam_smmu_map_kernel_buffer_and_add_to_list(int idx,
+	struct dma_buf *buf, enum dma_data_direction dma_dir,
+	dma_addr_t *paddr_ptr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id)
+{
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info = NULL;
+
+	rc = cam_smmu_map_buffer_validate(buf, idx, dma_dir, paddr_ptr, len_ptr,
+		region_id, &mapping_info);
+
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "buffer validation failure");
+		return rc;
+	}
+
+	mapping_info->ion_fd = -1;
+
+	/* add to the list */
+	list_add(&mapping_info->list,
+		&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list);
+
+	return 0;
+}
+
+
+static int cam_smmu_unmap_buf_and_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx)
+{
+	int rc;
+	size_t size;
+	struct iommu_domain *domain;
+
+	if ((!mapping_info->buf) || (!mapping_info->table) ||
+		(!mapping_info->attach)) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params dev = %pK, table = %pK",
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)mapping_info->table);
+		CAM_ERR(CAM_SMMU, "Error:dma_buf = %pK, attach = %pK",
+			(void *)mapping_info->buf,
+			(void *)mapping_info->attach);
+		return -EINVAL;
+	}
+
+	if (mapping_info->region_id == CAM_SMMU_REGION_SHARED) {
+		CAM_DBG(CAM_SMMU,
+			"Removing SHARED buffer paddr = %pK, len = %zu",
+			(void *)mapping_info->paddr, mapping_info->len);
+
+		domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+		size = iommu_unmap(domain,
+			mapping_info->paddr,
+			mapping_info->len);
+
+		if (size != mapping_info->len) {
+			CAM_ERR(CAM_SMMU, "IOMMU unmap failed");
+			CAM_ERR(CAM_SMMU, "Unmapped = %zu, requested = %zu",
+				size,
+				mapping_info->len);
+		}
+
+		rc = cam_smmu_free_iova(mapping_info->paddr,
+			mapping_info->len,
+			iommu_cb_set.cb_info[idx].handle);
+
+		if (rc)
+			CAM_ERR(CAM_SMMU, "IOVA free failed");
+
+	} else if (mapping_info->region_id == CAM_SMMU_REGION_IO) {
+		mapping_info->attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
+	}
+
+	dma_buf_unmap_attachment(mapping_info->attach,
+		mapping_info->table, mapping_info->dir);
+	dma_buf_detach(mapping_info->buf, mapping_info->attach);
+	dma_buf_put(mapping_info->buf);
+
+	mapping_info->buf = NULL;
+
+	list_del_init(&mapping_info->list);
+
+	/* free one buffer */
+	kfree(mapping_info);
+	return 0;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_fd_in_list(int idx,
+	int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_list, list) {
+		if (mapping->ion_fd == ion_fd) {
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_dma_buf_in_list(int idx,
+	struct dma_buf *buf, dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	struct cam_dma_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+		&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list, list) {
+		if (mapping->buf == buf) {
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+static enum cam_smmu_buf_state cam_smmu_check_secure_fd_in_list(int idx,
+					int ion_fd, dma_addr_t *paddr_ptr,
+					size_t *len_ptr)
+{
+	struct cam_sec_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->ion_fd == ion_fd) {
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			mapping->ref_count++;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+static enum cam_smmu_buf_state cam_smmu_validate_secure_fd_in_list(int idx,
+	int ion_fd, dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	struct cam_sec_buff_info *mapping;
+
+	list_for_each_entry(mapping,
+			&iommu_cb_set.cb_info[idx].smmu_buf_list,
+			list) {
+		if (mapping->ion_fd == ion_fd) {
+			*paddr_ptr = mapping->paddr;
+			*len_ptr = mapping->len;
+			return CAM_SMMU_BUFF_EXIST;
+		}
+	}
+
+	return CAM_SMMU_BUFF_NOT_EXIST;
+}
+
+int cam_smmu_get_handle(char *identifier, int *handle_ptr)
+{
+	int ret = 0;
+
+	if (!identifier) {
+		CAM_ERR(CAM_SMMU, "Error: iommu hardware name is NULL");
+		return -EINVAL;
+	}
+
+	if (!handle_ptr) {
+		CAM_ERR(CAM_SMMU, "Error: handle pointer is NULL");
+		return -EINVAL;
+	}
+
+	/* create and put handle in the table */
+	ret = cam_smmu_create_add_handle_in_table(identifier, handle_ptr);
+	if (ret < 0)
+		CAM_ERR(CAM_SMMU, "Error: %s get handle fail", identifier);
+
+	return ret;
+}
+EXPORT_SYMBOL(cam_smmu_get_handle);
+
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param ops)
+{
+	int ret = 0, idx;
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU, "Error: Index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	switch (ops) {
+	case CAM_SMMU_ATTACH: {
+		ret = cam_smmu_attach(idx);
+		break;
+	}
+	case CAM_SMMU_DETACH: {
+		ret = cam_smmu_detach_device(idx);
+		break;
+	}
+	case CAM_SMMU_VOTE:
+	case CAM_SMMU_DEVOTE:
+	default:
+		CAM_ERR(CAM_SMMU, "Error: idx = %d, ops = %d", idx, ops);
+		ret = -EINVAL;
+	}
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return ret;
+}
+EXPORT_SYMBOL(cam_smmu_ops);
+
+static int cam_smmu_alloc_scratch_buffer_add_to_list(int idx,
+	size_t virt_len,
+	size_t phys_len,
+	unsigned int iommu_dir,
+	dma_addr_t *virt_addr)
+{
+	unsigned long nents = virt_len / phys_len;
+	struct cam_dma_buff_info *mapping_info = NULL;
+	size_t unmapped;
+	dma_addr_t iova = 0;
+	struct scatterlist *sg;
+	int i = 0;
+	int rc;
+	struct iommu_domain *domain = NULL;
+	struct page *page;
+	struct sg_table *table = NULL;
+
+	CAM_DBG(CAM_SMMU, "nents = %lu, idx = %d, virt_len  = %zx",
+		nents, idx, virt_len);
+	CAM_DBG(CAM_SMMU, "phys_len = %zx, iommu_dir = %d, virt_addr = %pK",
+		phys_len, iommu_dir, virt_addr);
+
+	/*
+	 * This table will go inside the 'mapping' structure
+	 * where it will be held until put_scratch_buffer is called
+	 */
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table) {
+		rc = -ENOMEM;
+		goto err_table_alloc;
+	}
+
+	rc = sg_alloc_table(table, nents, GFP_KERNEL);
+	if (rc < 0) {
+		rc = -EINVAL;
+		goto err_sg_alloc;
+	}
+
+	page = alloc_pages(GFP_KERNEL, get_order(phys_len));
+	if (!page) {
+		rc = -ENOMEM;
+		goto err_page_alloc;
+	}
+
+	/* Now we create the sg list */
+	for_each_sg(table->sgl, sg, table->nents, i)
+		sg_set_page(sg, page, phys_len, 0);
+
+
+	/* Get the domain from within our cb_set struct and map it*/
+	domain = iommu_cb_set.cb_info[idx].mapping->domain;
+
+	rc = cam_smmu_alloc_scratch_va(&iommu_cb_set.cb_info[idx].scratch_map,
+		virt_len, &iova);
+
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU,
+			"Could not find valid iova for scratch buffer");
+		goto err_iommu_map;
+	}
+
+	if (iommu_map_sg(domain,
+		iova,
+		table->sgl,
+		table->nents,
+		iommu_dir) != virt_len) {
+		CAM_ERR(CAM_SMMU, "iommu_map_sg() failed");
+		goto err_iommu_map;
+	}
+
+	/* Now update our mapping information within the cb_set struct */
+	mapping_info = kzalloc(sizeof(struct cam_dma_buff_info), GFP_KERNEL);
+	if (!mapping_info) {
+		rc = -ENOMEM;
+		goto err_mapping_info;
+	}
+
+	mapping_info->ion_fd = 0xDEADBEEF;
+	mapping_info->buf = NULL;
+	mapping_info->attach = NULL;
+	mapping_info->table = table;
+	mapping_info->paddr = iova;
+	mapping_info->len = virt_len;
+	mapping_info->iommu_dir = iommu_dir;
+	mapping_info->ref_count = 1;
+	mapping_info->phys_len = phys_len;
+	mapping_info->region_id = CAM_SMMU_REGION_SCRATCH;
+
+	CAM_DBG(CAM_SMMU, "paddr = %pK, len = %zx, phys_len = %zx",
+		(void *)mapping_info->paddr,
+		mapping_info->len, mapping_info->phys_len);
+
+	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+	*virt_addr = (dma_addr_t)iova;
+
+	CAM_DBG(CAM_SMMU, "mapped virtual address = %lx",
+		(unsigned long)*virt_addr);
+	return 0;
+
+err_mapping_info:
+	unmapped = iommu_unmap(domain, iova,  virt_len);
+	if (unmapped != virt_len)
+		CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
+			unmapped, virt_len);
+err_iommu_map:
+	__free_pages(page, get_order(phys_len));
+err_page_alloc:
+	sg_free_table(table);
+err_sg_alloc:
+	kfree(table);
+err_table_alloc:
+	return rc;
+}
+
+static int cam_smmu_free_scratch_buffer_remove_from_list(
+	struct cam_dma_buff_info *mapping_info,
+	int idx)
+{
+	int rc = 0;
+	size_t unmapped;
+	struct iommu_domain *domain =
+		iommu_cb_set.cb_info[idx].mapping->domain;
+	struct scratch_mapping *scratch_map =
+		&iommu_cb_set.cb_info[idx].scratch_map;
+
+	if (!mapping_info->table) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params: dev = %pK, table = %pK",
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)mapping_info->table);
+		return -EINVAL;
+	}
+
+	/* Clean up the mapping_info struct from the list */
+	unmapped = iommu_unmap(domain, mapping_info->paddr, mapping_info->len);
+	if (unmapped != mapping_info->len)
+		CAM_ERR(CAM_SMMU, "Unmapped only %zx instead of %zx",
+			unmapped, mapping_info->len);
+
+	rc = cam_smmu_free_scratch_va(scratch_map,
+		mapping_info->paddr,
+		mapping_info->len);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid iova while freeing scratch buffer");
+		rc = -EINVAL;
+	}
+
+	__free_pages(sg_page(mapping_info->table->sgl),
+			get_order(mapping_info->phys_len));
+	sg_free_table(mapping_info->table);
+	kfree(mapping_info->table);
+	list_del_init(&mapping_info->list);
+
+	kfree(mapping_info);
+	mapping_info = NULL;
+
+	return rc;
+}
+
+int cam_smmu_get_scratch_iova(int handle,
+	enum cam_smmu_map_dir dir,
+	dma_addr_t *paddr_ptr,
+	size_t virt_len,
+	size_t phys_len)
+{
+	int idx, rc;
+	unsigned int iommu_dir;
+
+	if (!paddr_ptr || !virt_len || !phys_len) {
+		CAM_ERR(CAM_SMMU, "Error: Input pointer or lengths invalid");
+		return -EINVAL;
+	}
+
+	if (virt_len < phys_len) {
+		CAM_ERR(CAM_SMMU, "Error: virt_len > phys_len");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	iommu_dir = cam_smmu_translate_dir_to_iommu_dir(dir);
+	if (iommu_dir == IOMMU_INVALID_DIR) {
+		CAM_ERR(CAM_SMMU,
+			"Error: translate direction failed. dir = %d", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Context bank does not support scratch bufs");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	CAM_DBG(CAM_SMMU, "smmu handle = %x, idx = %d, dir = %d",
+		handle, idx, dir);
+	CAM_DBG(CAM_SMMU, "virt_len = %zx, phys_len  = %zx",
+		phys_len, virt_len);
+
+	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+		CAM_ERR(CAM_SMMU,
+			"Err:Dev %s should call SMMU attach before map buffer",
+			iommu_cb_set.cb_info[idx].name);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!IS_ALIGNED(virt_len, PAGE_SIZE)) {
+		CAM_ERR(CAM_SMMU,
+			"Requested scratch buffer length not page aligned");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	if (!IS_ALIGNED(virt_len, phys_len)) {
+		CAM_ERR(CAM_SMMU,
+			"Requested virt length not aligned with phys length");
+		rc = -EINVAL;
+		goto error;
+	}
+
+	rc = cam_smmu_alloc_scratch_buffer_add_to_list(idx,
+		virt_len,
+		phys_len,
+		iommu_dir,
+		paddr_ptr);
+	if (rc < 0)
+		CAM_ERR(CAM_SMMU, "Error: mapping or add list fail");
+
+error:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+
+int cam_smmu_put_scratch_iova(int handle,
+	dma_addr_t paddr)
+{
+	int idx;
+	int rc = -1;
+	struct cam_dma_buff_info *mapping_info;
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto handle_err;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].scratch_buf_support) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Context bank does not support scratch buffers");
+		rc = -EINVAL;
+		goto handle_err;
+	}
+
+	/* Based on virtual address and index, we can find mapping info
+	 * of the scratch buffer
+	 */
+	mapping_info = cam_smmu_find_mapping_by_virt_address(idx, paddr);
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid params");
+		rc = -ENODEV;
+		goto handle_err;
+	}
+
+	/* unmapping one buffer from device */
+	rc = cam_smmu_free_scratch_buffer_remove_from_list(mapping_info, idx);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+		goto handle_err;
+	}
+
+handle_err:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+
+static int cam_smmu_map_stage2_buffer_and_add_to_list(int idx, int ion_fd,
+		 enum dma_data_direction dma_dir, dma_addr_t *paddr_ptr,
+		 size_t *len_ptr)
+{
+	int rc = 0;
+	struct dma_buf *dmabuf = NULL;
+	struct dma_buf_attachment *attach = NULL;
+	struct sg_table *table = NULL;
+	struct cam_sec_buff_info *mapping_info;
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	dmabuf = dma_buf_get(ion_fd);
+	if (IS_ERR_OR_NULL((void *)(dmabuf))) {
+		CAM_ERR(CAM_SMMU,
+			"Error: dma buf get failed, idx=%d, ion_fd=%d",
+			idx, ion_fd);
+		rc = PTR_ERR(dmabuf);
+		goto err_out;
+	}
+
+	/*
+	 * ion_phys() is deprecated. call dma_buf_attach() and
+	 * dma_buf_map_attachment() to get the buffer's physical
+	 * address.
+	 */
+	attach = dma_buf_attach(dmabuf, iommu_cb_set.cb_info[idx].dev);
+	if (IS_ERR_OR_NULL(attach)) {
+		CAM_ERR(CAM_SMMU,
+			"Error: dma buf attach failed, idx=%d, ion_fd=%d",
+			idx, ion_fd);
+		rc = PTR_ERR(attach);
+		goto err_put;
+	}
+
+	attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+	table = dma_buf_map_attachment(attach, dma_dir);
+	if (IS_ERR_OR_NULL(table)) {
+		CAM_ERR(CAM_SMMU, "Error: dma buf map attachment failed");
+		rc = PTR_ERR(table);
+		goto err_detach;
+	}
+
+	/* return addr and len to client */
+	*paddr_ptr = sg_phys(table->sgl);
+	*len_ptr = (size_t)sg_dma_len(table->sgl);
+
+	/* fill up mapping_info */
+	mapping_info = kzalloc(sizeof(struct cam_sec_buff_info), GFP_KERNEL);
+	if (!mapping_info) {
+		rc = -ENOMEM;
+		goto err_unmap_sg;
+	}
+
+	mapping_info->ion_fd = ion_fd;
+	mapping_info->paddr = *paddr_ptr;
+	mapping_info->len = *len_ptr;
+	mapping_info->dir = dma_dir;
+	mapping_info->ref_count = 1;
+	mapping_info->buf = dmabuf;
+
+	CAM_DBG(CAM_SMMU, "idx=%d, ion_fd=%d, dev=%pK, paddr=%pK, len=%u",
+			idx, ion_fd,
+			(void *)iommu_cb_set.cb_info[idx].dev,
+			(void *)*paddr_ptr, (unsigned int)*len_ptr);
+
+	/* add to the list */
+	list_add(&mapping_info->list, &iommu_cb_set.cb_info[idx].smmu_buf_list);
+
+	return 0;
+
+err_unmap_sg:
+	dma_buf_unmap_attachment(attach, table, dma_dir);
+err_detach:
+	dma_buf_detach(dmabuf, attach);
+err_put:
+	dma_buf_put(dmabuf);
+err_out:
+	return rc;
+}
+
+int cam_smmu_map_stage2_iova(int handle,
+		int ion_fd, enum cam_smmu_map_dir dir,
+		dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	int idx, rc;
+	enum dma_data_direction dma_dir;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid inputs, paddr_ptr:%pK, len_ptr: %pK",
+			paddr_ptr, len_ptr);
+		return -EINVAL;
+	}
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	if (dma_dir == DMA_NONE) {
+		CAM_ERR(CAM_SMMU,
+			"Error: translate direction failed. dir = %d", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if ((handle == HANDLE_INIT) ||
+		(idx < 0) ||
+		(idx >= iommu_cb_set.cb_num)) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't map secure mem to non secure cb, idx=%d",
+			idx);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, idx=%d, table_hdl=%x, hdl=%x",
+			idx, iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_secure_fd_in_list(idx, ion_fd, paddr_ptr,
+			len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_EXIST) {
+		CAM_DBG(CAM_SMMU,
+			"fd:%d already in list idx:%d, handle=%d give same addr back",
+			ion_fd, idx, handle);
+		rc = 0;
+		goto get_addr_end;
+	}
+	rc = cam_smmu_map_stage2_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+			paddr_ptr, len_ptr);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU,
+			"Error: mapping or add list fail, idx=%d, handle=%d, fd=%d, rc=%d",
+			idx, handle, ion_fd, rc);
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_stage2_iova);
+
+static int cam_smmu_secure_unmap_buf_and_remove_from_list(
+		struct cam_sec_buff_info *mapping_info,
+		int idx)
+{
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU, "Error: List doesn't exist");
+		return -EINVAL;
+	}
+	dma_buf_put(mapping_info->buf);
+	list_del_init(&mapping_info->list);
+
+	CAM_DBG(CAM_SMMU, "unmap fd: %d, idx : %d", mapping_info->ion_fd, idx);
+
+	/* free one buffer */
+	kfree(mapping_info);
+	return 0;
+}
+
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd)
+{
+	int idx, rc;
+	struct cam_sec_buff_info *mapping_info;
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if ((handle == HANDLE_INIT) ||
+		(idx < 0) ||
+		(idx >= iommu_cb_set.cb_num)) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't unmap secure mem from non secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	/* based on ion fd and index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_sec_buf_idx(idx, ion_fd);
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params! idx = %d, fd = %d",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	mapping_info->ref_count--;
+	if (mapping_info->ref_count > 0) {
+		CAM_DBG(CAM_SMMU,
+			"idx: %d fd = %d ref_count: %d",
+			idx, ion_fd, mapping_info->ref_count);
+		rc = 0;
+		goto put_addr_end;
+	}
+	mapping_info->ref_count = 0;
+
+	/* unmapping one buffer from device */
+	rc = cam_smmu_secure_unmap_buf_and_remove_from_list(mapping_info, idx);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+		goto put_addr_end;
+	}
+
+put_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_stage2_iova);
+
+static int cam_smmu_map_iova_validate_params(int handle,
+	enum cam_smmu_map_dir dir,
+	dma_addr_t *paddr_ptr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id)
+{
+	int idx, rc = 0;
+	enum dma_data_direction dma_dir;
+
+	if (!paddr_ptr || !len_ptr) {
+		CAM_ERR(CAM_SMMU, "Input pointers are invalid");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Invalid handle");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	if (region_id != CAM_SMMU_REGION_SHARED)
+		*len_ptr = (size_t)0;
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	if (dma_dir == DMA_NONE) {
+		CAM_ERR(CAM_SMMU, "translate direction failed. dir = %d", dir);
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU, "handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_smmu_map_user_iova(int handle, int ion_fd,
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+	enum dma_data_direction dma_dir;
+
+	rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr,
+		len_ptr, region_id);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed");
+		return rc;
+	}
+
+	dma_dir = (enum dma_data_direction)dir;
+	idx = GET_SMMU_TABLE_IDX(handle);
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't map non-secure mem to secure cb idx=%d",
+			idx);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"hdl is not valid, idx=%d, table_hdl = %x, hdl = %x",
+			idx, iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+		CAM_ERR(CAM_SMMU,
+			"Err:Dev %s should call SMMU attach before map buffer",
+			iommu_cb_set.cb_info[idx].name);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_EXIST) {
+		CAM_ERR(CAM_SMMU,
+			"fd:%d already in list idx:%d, handle=%d, give same addr back",
+			ion_fd, idx, handle);
+		rc = -EALREADY;
+		goto get_addr_end;
+	}
+
+	rc = cam_smmu_map_buffer_and_add_to_list(idx, ion_fd, dma_dir,
+			paddr_ptr, len_ptr, region_id);
+	if (rc < 0)
+		CAM_ERR(CAM_SMMU,
+			"mapping or add list fail, idx=%d, fd=%d, region=%d, rc=%d",
+			idx, ion_fd, region_id, rc);
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_user_iova);
+
+int cam_smmu_map_kernel_iova(int handle, struct dma_buf *buf,
+	enum cam_smmu_map_dir dir, dma_addr_t *paddr_ptr,
+	size_t *len_ptr, enum cam_smmu_region_id region_id)
+{
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+	enum dma_data_direction dma_dir;
+
+	rc = cam_smmu_map_iova_validate_params(handle, dir, paddr_ptr,
+		len_ptr, region_id);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "initial checks failed, unable to proceed");
+		return rc;
+	}
+
+	dma_dir = cam_smmu_translate_dir(dir);
+	idx = GET_SMMU_TABLE_IDX(handle);
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't map non-secure mem to secure cb");
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU, "hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].state != CAM_SMMU_ATTACH) {
+		CAM_ERR(CAM_SMMU,
+			"Err:Dev %s should call SMMU attach before map buffer",
+			iommu_cb_set.cb_info[idx].name);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_dma_buf_in_list(idx, buf,
+	paddr_ptr, len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_EXIST) {
+		CAM_ERR(CAM_SMMU,
+			"dma_buf :%pK already in the list", buf);
+		rc = -EALREADY;
+		goto get_addr_end;
+	}
+
+	rc = cam_smmu_map_kernel_buffer_and_add_to_list(idx, buf, dma_dir,
+			paddr_ptr, len_ptr, region_id);
+	if (rc < 0)
+		CAM_ERR(CAM_SMMU, "mapping or add list fail");
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_map_kernel_iova);
+
+int cam_smmu_get_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't get non-secure mem from secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_check_fd_in_list(idx, ion_fd, paddr_ptr, len_ptr);
+	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+		CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_iova);
+
+int cam_smmu_get_stage2_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr)
+{
+	int idx, rc = 0;
+	enum cam_smmu_buf_state buf_state;
+
+	if (!paddr_ptr || !len_ptr) {
+		CAM_ERR(CAM_SMMU, "Error: Input pointers are invalid");
+		return -EINVAL;
+	}
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	/* clean the content from clients */
+	*paddr_ptr = (dma_addr_t)NULL;
+	*len_ptr = (size_t)0;
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	if (!iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't get secure mem from non secure cb");
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+	buf_state = cam_smmu_validate_secure_fd_in_list(idx,
+		ion_fd,
+		paddr_ptr,
+		len_ptr);
+
+	if (buf_state == CAM_SMMU_BUFF_NOT_EXIST) {
+		CAM_ERR(CAM_SMMU, "ion_fd:%d not in the mapped list", ion_fd);
+		rc = -EINVAL;
+		goto get_addr_end;
+	}
+
+get_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_get_stage2_iova);
+
+static int cam_smmu_unmap_validate_params(int handle)
+{
+	int idx;
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_smmu_unmap_user_iova(int handle,
+	int ion_fd, enum cam_smmu_region_id region_id)
+{
+	int idx, rc;
+	struct cam_dma_buff_info *mapping_info;
+
+	rc = cam_smmu_unmap_validate_params(handle);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "unmap util validation failure");
+		return rc;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't unmap non-secure mem from secure cb");
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Based on ion_fd & index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params idx = %d, fd = %d",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Unmapping one buffer from device */
+	CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
+	rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+	if (rc < 0)
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+
+unmap_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_user_iova);
+
+int cam_smmu_unmap_kernel_iova(int handle,
+	struct dma_buf *buf, enum cam_smmu_region_id region_id)
+{
+	int idx, rc;
+	struct cam_dma_buff_info *mapping_info;
+
+	rc = cam_smmu_unmap_validate_params(handle);
+	if (rc) {
+		CAM_ERR(CAM_SMMU, "unmap util validation failure");
+		return rc;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		CAM_ERR(CAM_SMMU,
+			"Error: can't unmap non-secure mem from secure cb");
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Based on dma_buf & index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_dma_buf(idx, buf);
+
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU,
+			"Error: Invalid params idx = %d, dma_buf = %pK",
+			idx, buf);
+		rc = -EINVAL;
+		goto unmap_end;
+	}
+
+	/* Unmapping one buffer from device */
+	CAM_DBG(CAM_SMMU, "SMMU: removing buffer idx = %d", idx);
+	rc = cam_smmu_unmap_buf_and_remove_from_list(mapping_info, idx);
+	if (rc < 0)
+		CAM_ERR(CAM_SMMU, "Error: unmap or remove list fail");
+
+unmap_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_unmap_kernel_iova);
+
+
+int cam_smmu_put_iova(int handle, int ion_fd)
+{
+	int idx;
+	int rc = 0;
+	struct cam_dma_buff_info *mapping_info;
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	/* find index in the iommu_cb_set.cb_info */
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+	/* based on ion fd and index, we can find mapping info of buffer */
+	mapping_info = cam_smmu_find_mapping_by_ion_index(idx, ion_fd);
+	if (!mapping_info) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid params idx = %d, fd = %d",
+			idx, ion_fd);
+		rc = -EINVAL;
+		goto put_addr_end;
+	}
+
+put_addr_end:
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return rc;
+}
+EXPORT_SYMBOL(cam_smmu_put_iova);
+
+int cam_smmu_destroy_handle(int handle)
+{
+	int idx;
+
+	if (handle == HANDLE_INIT) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid handle");
+		return -EINVAL;
+	}
+
+	idx = GET_SMMU_TABLE_IDX(handle);
+	if (idx < 0 || idx >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU,
+			"Error: handle or index invalid. idx = %d hdl = %x",
+			idx, handle);
+		return -EINVAL;
+	}
+
+	mutex_lock(&iommu_cb_set.cb_info[idx].lock);
+	if (iommu_cb_set.cb_info[idx].handle != handle) {
+		CAM_ERR(CAM_SMMU,
+			"Error: hdl is not valid, table_hdl = %x, hdl = %x",
+			iommu_cb_set.cb_info[idx].handle, handle);
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return -EINVAL;
+	}
+
+	if (!list_empty_careful(&iommu_cb_set.cb_info[idx].smmu_buf_list)) {
+		CAM_ERR(CAM_SMMU, "UMD %s buffer list is not clean",
+			iommu_cb_set.cb_info[idx].name);
+		cam_smmu_print_user_list(idx);
+		cam_smmu_clean_user_buffer_list(idx);
+	}
+
+	if (!list_empty_careful(
+		&iommu_cb_set.cb_info[idx].smmu_buf_kernel_list)) {
+		CAM_ERR(CAM_SMMU, "KMD %s buffer list is not clean",
+			iommu_cb_set.cb_info[idx].name);
+		cam_smmu_print_kernel_list(idx);
+		cam_smmu_clean_kernel_buffer_list(idx);
+	}
+
+	if (iommu_cb_set.cb_info[idx].is_secure) {
+		if (iommu_cb_set.cb_info[idx].secure_count == 0) {
+			mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+			return -EPERM;
+		}
+
+		iommu_cb_set.cb_info[idx].secure_count--;
+		if (iommu_cb_set.cb_info[idx].secure_count == 0) {
+			iommu_cb_set.cb_info[idx].cb_count = 0;
+			iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+		}
+
+		mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+		return 0;
+	}
+
+	iommu_cb_set.cb_info[idx].cb_count = 0;
+	iommu_cb_set.cb_info[idx].handle = HANDLE_INIT;
+	mutex_unlock(&iommu_cb_set.cb_info[idx].lock);
+	return 0;
+}
+EXPORT_SYMBOL(cam_smmu_destroy_handle);
+
+static void cam_smmu_deinit_cb(struct cam_context_bank_info *cb)
+{
+	arm_iommu_detach_device(cb->dev);
+
+	if (cb->io_support && cb->mapping) {
+		arm_iommu_release_mapping(cb->mapping);
+		cb->mapping = NULL;
+	}
+
+	if (cb->shared_support) {
+		gen_pool_destroy(cb->shared_mem_pool);
+		cb->shared_mem_pool = NULL;
+	}
+
+	if (cb->scratch_buf_support) {
+		kfree(cb->scratch_map.bitmap);
+		cb->scratch_map.bitmap = NULL;
+	}
+}
+
+static void cam_smmu_release_cb(struct platform_device *pdev)
+{
+	int i = 0;
+
+	for (i = 0; i < iommu_cb_set.cb_num; i++)
+		cam_smmu_deinit_cb(&iommu_cb_set.cb_info[i]);
+
+	devm_kfree(&pdev->dev, iommu_cb_set.cb_info);
+	iommu_cb_set.cb_num = 0;
+}
+
+static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
+	struct device *dev)
+{
+	int rc = 0;
+
+	if (!cb || !dev) {
+		CAM_ERR(CAM_SMMU, "Error: invalid input params");
+		return -EINVAL;
+	}
+
+	cb->dev = dev;
+	cb->is_fw_allocated = false;
+	cb->is_secheap_allocated = false;
+
+	/* Create a pool with 64K granularity for supporting shared memory */
+	if (cb->shared_support) {
+		cb->shared_mem_pool = gen_pool_create(
+			SHARED_MEM_POOL_GRANULARITY, -1);
+
+		if (!cb->shared_mem_pool)
+			return -ENOMEM;
+
+		rc = gen_pool_add(cb->shared_mem_pool,
+			cb->shared_info.iova_start,
+			cb->shared_info.iova_len,
+			-1);
+
+		CAM_DBG(CAM_SMMU, "Shared mem start->%lX",
+			(unsigned long)cb->shared_info.iova_start);
+		CAM_DBG(CAM_SMMU, "Shared mem len->%zu",
+			cb->shared_info.iova_len);
+
+		if (rc) {
+			CAM_ERR(CAM_SMMU, "Genpool chunk creation failed");
+			gen_pool_destroy(cb->shared_mem_pool);
+			cb->shared_mem_pool = NULL;
+			return rc;
+		}
+	}
+
+	if (cb->scratch_buf_support) {
+		rc = cam_smmu_init_scratch_map(&cb->scratch_map,
+			cb->scratch_info.iova_start,
+			cb->scratch_info.iova_len,
+			0);
+		if (rc < 0) {
+			CAM_ERR(CAM_SMMU,
+				"Error: failed to create scratch map");
+			rc = -ENODEV;
+			goto end;
+		}
+	}
+
+	/* create a virtual mapping */
+	if (cb->io_support) {
+		cb->mapping = arm_iommu_create_mapping(&platform_bus_type,
+			cb->io_info.iova_start, cb->io_info.iova_len);
+		if (IS_ERR(cb->mapping)) {
+			CAM_ERR(CAM_SMMU, "Error: create mapping Failed");
+			rc = -ENODEV;
+			goto end;
+		}
+
+		iommu_cb_set.non_fatal_fault = 1;
+		if (iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_NON_FATAL_FAULTS,
+			&iommu_cb_set.non_fatal_fault) < 0) {
+			CAM_ERR(CAM_SMMU,
+				"Error: failed to set non fatal fault attribute");
+		}
+
+	} else {
+		CAM_ERR(CAM_SMMU, "Context bank does not have IO region");
+		rc = -ENODEV;
+		goto end;
+	}
+
+	return rc;
+end:
+	if (cb->shared_support) {
+		gen_pool_destroy(cb->shared_mem_pool);
+		cb->shared_mem_pool = NULL;
+	}
+
+	if (cb->scratch_buf_support) {
+		kfree(cb->scratch_map.bitmap);
+		cb->scratch_map.bitmap = NULL;
+	}
+
+	return rc;
+}
+
+static int cam_alloc_smmu_context_banks(struct device *dev)
+{
+	struct device_node *domains_child_node = NULL;
+
+	if (!dev) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid device");
+		return -ENODEV;
+	}
+
+	iommu_cb_set.cb_num = 0;
+
+	/* traverse thru all the child nodes and increment the cb count */
+	for_each_available_child_of_node(dev->of_node, domains_child_node) {
+		if (of_device_is_compatible(domains_child_node,
+			"qcom,msm-cam-smmu-cb"))
+			iommu_cb_set.cb_num++;
+
+		if (of_device_is_compatible(domains_child_node,
+			"qcom,qsmmu-cam-cb"))
+			iommu_cb_set.cb_num++;
+	}
+
+	if (iommu_cb_set.cb_num == 0) {
+		CAM_ERR(CAM_SMMU, "Error: no context banks present");
+		return -ENOENT;
+	}
+
+	/* allocate memory for the context banks */
+	iommu_cb_set.cb_info = devm_kzalloc(dev,
+		iommu_cb_set.cb_num * sizeof(struct cam_context_bank_info),
+		GFP_KERNEL);
+
+	if (!iommu_cb_set.cb_info) {
+		CAM_ERR(CAM_SMMU, "Error: cannot allocate context banks");
+		return -ENOMEM;
+	}
+
+	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_INIT);
+	iommu_cb_set.cb_init_count = 0;
+
+	CAM_DBG(CAM_SMMU, "no of context banks :%d", iommu_cb_set.cb_num);
+	return 0;
+}
+
+static int cam_smmu_get_memory_regions_info(struct device_node *of_node,
+	struct cam_context_bank_info *cb)
+{
+	int rc = 0;
+	struct device_node *mem_map_node = NULL;
+	struct device_node *child_node = NULL;
+	const char *region_name;
+	int num_regions = 0;
+
+	if (!of_node || !cb) {
+		CAM_ERR(CAM_SMMU, "Invalid argument(s)");
+		return -EINVAL;
+	}
+
+	mem_map_node = of_get_child_by_name(of_node, "iova-mem-map");
+	cb->is_secure = of_property_read_bool(of_node, "qcom,secure-cb");
+
+	/*
+	 * We always expect a memory map node, except when it is a secure
+	 * context bank.
+	 */
+	if (!mem_map_node) {
+		if (cb->is_secure)
+			return 0;
+		CAM_ERR(CAM_SMMU, "iova-mem-map not present");
+		return -EINVAL;
+	}
+
+	for_each_available_child_of_node(mem_map_node, child_node) {
+		uint32_t region_start;
+		uint32_t region_len;
+		uint32_t region_id;
+		uint32_t qdss_region_phy_addr = 0;
+
+		num_regions++;
+		rc = of_property_read_string(child_node,
+			"iova-region-name", &region_name);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			CAM_ERR(CAM_SMMU, "IOVA region not found");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-start", &region_start);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-start");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-len", &region_len);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-len");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32(child_node,
+			"iova-region-id", &region_id);
+		if (rc < 0) {
+			of_node_put(mem_map_node);
+			CAM_ERR(CAM_SMMU, "Failed to read iova-region-id");
+			return -EINVAL;
+		}
+
+		if (strcmp(region_name, qdss_region_name) == 0) {
+			rc = of_property_read_u32(child_node,
+				"qdss-phy-addr", &qdss_region_phy_addr);
+			if (rc < 0) {
+				of_node_put(mem_map_node);
+				CAM_ERR(CAM_SMMU,
+					"Failed to read qdss phy addr");
+				return -EINVAL;
+			}
+		}
+
+		switch (region_id) {
+		case CAM_SMMU_REGION_FIRMWARE:
+			cb->firmware_support = 1;
+			cb->firmware_info.iova_start = region_start;
+			cb->firmware_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_SHARED:
+			cb->shared_support = 1;
+			cb->shared_info.iova_start = region_start;
+			cb->shared_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_SCRATCH:
+			cb->scratch_buf_support = 1;
+			cb->scratch_info.iova_start = region_start;
+			cb->scratch_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_IO:
+			cb->io_support = 1;
+			cb->io_info.iova_start = region_start;
+			cb->io_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_SECHEAP:
+			cb->secheap_support = 1;
+			cb->secheap_info.iova_start = region_start;
+			cb->secheap_info.iova_len = region_len;
+			break;
+		case CAM_SMMU_REGION_QDSS:
+			cb->qdss_support = 1;
+			cb->qdss_info.iova_start = region_start;
+			cb->qdss_info.iova_len = region_len;
+			cb->qdss_phy_addr = qdss_region_phy_addr;
+			break;
+		default:
+			CAM_ERR(CAM_SMMU,
+				"Incorrect region id present in DT file: %d",
+				region_id);
+		}
+
+		CAM_DBG(CAM_SMMU, "Found label -> %s", cb->name);
+		CAM_DBG(CAM_SMMU, "Found region -> %s", region_name);
+		CAM_DBG(CAM_SMMU, "region_start -> %X", region_start);
+		CAM_DBG(CAM_SMMU, "region_len -> %X", region_len);
+		CAM_DBG(CAM_SMMU, "region_id -> %X", region_id);
+	}
+	of_node_put(mem_map_node);
+
+	if (!num_regions) {
+		CAM_ERR(CAM_SMMU,
+			"No memory regions found, at least one needed");
+		rc = -ENODEV;
+	}
+
+	return rc;
+}
+
+static int cam_populate_smmu_context_banks(struct device *dev,
+	enum cam_iommu_type type)
+{
+	int rc = 0;
+	struct cam_context_bank_info *cb;
+	struct device *ctx = NULL;
+
+	if (!dev) {
+		CAM_ERR(CAM_SMMU, "Error: Invalid device");
+		return -ENODEV;
+	}
+
+	/* check the bounds */
+	if (iommu_cb_set.cb_init_count >= iommu_cb_set.cb_num) {
+		CAM_ERR(CAM_SMMU, "Error: populate more than allocated cb");
+		rc = -EBADHANDLE;
+		goto cb_init_fail;
+	}
+
+	/* read the context bank from cb set */
+	cb = &iommu_cb_set.cb_info[iommu_cb_set.cb_init_count];
+
+	/* set the name of the context bank */
+	rc = of_property_read_string(dev->of_node, "label", &cb->name);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU,
+			"Error: failed to read label from sub device");
+		goto cb_init_fail;
+	}
+
+	rc = cam_smmu_get_memory_regions_info(dev->of_node,
+		cb);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU, "Error: Getting region info");
+		return rc;
+	}
+
+	if (cb->is_secure) {
+		/* increment count to next bank */
+		cb->dev = dev;
+		iommu_cb_set.cb_init_count++;
+		return 0;
+	}
+
+	/* set up the iommu mapping for the  context bank */
+	if (type == CAM_QSMMU) {
+		CAM_ERR(CAM_SMMU, "Error: QSMMU ctx not supported for : %s",
+			cb->name);
+		return -ENODEV;
+	}
+
+	ctx = dev;
+	CAM_DBG(CAM_SMMU, "getting Arm SMMU ctx : %s", cb->name);
+
+	rc = cam_smmu_setup_cb(cb, ctx);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU, "Error: failed to setup cb : %s", cb->name);
+		goto cb_init_fail;
+	}
+	if (cb->io_support && cb->mapping)
+		iommu_set_fault_handler(cb->mapping->domain,
+			cam_smmu_iommu_fault_handler,
+			(void *)cb->name);
+	/* increment count to next bank */
+	iommu_cb_set.cb_init_count++;
+
+	CAM_DBG(CAM_SMMU, "X: cb init count :%d", iommu_cb_set.cb_init_count);
+
+cb_init_fail:
+	return rc;
+}
+
+static int cam_smmu_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct device *dev = &pdev->dev;
+
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu")) {
+		rc = cam_alloc_smmu_context_banks(dev);
+		if (rc < 0) {
+			CAM_ERR(CAM_SMMU, "Error: allocating context banks");
+			return -ENOMEM;
+		}
+	}
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-cb")) {
+		rc = cam_populate_smmu_context_banks(dev, CAM_ARM_SMMU);
+		if (rc < 0) {
+			CAM_ERR(CAM_SMMU, "Error: populating context banks");
+			cam_smmu_release_cb(pdev);
+			return -ENOMEM;
+		}
+		return rc;
+	}
+	if (of_device_is_compatible(dev->of_node, "qcom,qsmmu-cam-cb")) {
+		rc = cam_populate_smmu_context_banks(dev, CAM_QSMMU);
+		if (rc < 0) {
+			CAM_ERR(CAM_SMMU, "Error: populating context banks");
+			return -ENOMEM;
+		}
+		return rc;
+	}
+
+	if (of_device_is_compatible(dev->of_node, "qcom,msm-cam-smmu-fw-dev")) {
+		icp_fw.fw_dev = &pdev->dev;
+		icp_fw.fw_kva = NULL;
+		icp_fw.fw_dma_hdl = 0;
+		return rc;
+	}
+
+	/* probe through all the subdevices */
+	rc = of_platform_populate(pdev->dev.of_node, msm_cam_smmu_dt_match,
+				NULL, &pdev->dev);
+	if (rc < 0) {
+		CAM_ERR(CAM_SMMU, "Error: populating devices");
+	} else {
+		INIT_WORK(&iommu_cb_set.smmu_work, cam_smmu_page_fault_work);
+		mutex_init(&iommu_cb_set.payload_list_lock);
+		INIT_LIST_HEAD(&iommu_cb_set.payload_list);
+	}
+
+	return rc;
+}
+
+static int cam_smmu_remove(struct platform_device *pdev)
+{
+	/* release all the context banks and memory allocated */
+	cam_smmu_reset_iommu_table(CAM_SMMU_TABLE_DEINIT);
+	if (of_device_is_compatible(pdev->dev.of_node, "qcom,msm-cam-smmu"))
+		cam_smmu_release_cb(pdev);
+	return 0;
+}
+
+static struct platform_driver cam_smmu_driver = {
+	.probe = cam_smmu_probe,
+	.remove = cam_smmu_remove,
+	.driver = {
+		.name = "msm_cam_smmu",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_cam_smmu_dt_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_smmu_init_module(void)
+{
+	return platform_driver_register(&cam_smmu_driver);
+}
+
+static void __exit cam_smmu_exit_module(void)
+{
+	platform_driver_unregister(&cam_smmu_driver);
+}
+
+module_init(cam_smmu_init_module);
+module_exit(cam_smmu_exit_module);
+MODULE_DESCRIPTION("MSM Camera SMMU driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
new file mode 100644
index 0000000..1a73de7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.h
@@ -0,0 +1,384 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SMMU_API_H_
+#define _CAM_SMMU_API_H_
+
+#include <linux/dma-direction.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-direction.h>
+#include <linux/of_platform.h>
+#include <linux/iommu.h>
+#include <linux/random.h>
+#include <linux/spinlock_types.h>
+#include <linux/mutex.h>
+#include <linux/msm_ion.h>
+
+/*Enum for possible CAM SMMU operations */
+enum cam_smmu_ops_param {
+	CAM_SMMU_ATTACH,
+	CAM_SMMU_DETACH,
+	CAM_SMMU_VOTE,
+	CAM_SMMU_DEVOTE,
+	CAM_SMMU_OPS_INVALID
+};
+
+enum cam_smmu_map_dir {
+	CAM_SMMU_MAP_READ,
+	CAM_SMMU_MAP_WRITE,
+	CAM_SMMU_MAP_RW,
+	CAM_SMMU_MAP_INVALID
+};
+
+enum cam_smmu_region_id {
+	CAM_SMMU_REGION_FIRMWARE,
+	CAM_SMMU_REGION_SHARED,
+	CAM_SMMU_REGION_SCRATCH,
+	CAM_SMMU_REGION_IO,
+	CAM_SMMU_REGION_SECHEAP,
+	CAM_SMMU_REGION_QDSS
+};
+
+/**
+ * @brief        : Callback function type that gets called back on cam
+ *                     smmu page fault.
+ *
+ * @param domain   : Iommu domain received in iommu page fault handler
+ * @param dev      : Device received in iommu page fault handler
+ * @param iova     : IOVA where page fault occurred
+ * @param flags    : Flags received in iommu page fault handler
+ * @param token    : Userdata given during callback registration
+ * @param buf_info : Closest mapped buffer info
+ */
+typedef void (*cam_smmu_client_page_fault_handler)(struct iommu_domain *domain,
+	struct device *dev, unsigned long iova, int flags, void *token,
+	uint32_t buf_info);
+
+/**
+ * @brief            : Structure to store region information
+ *
+ * @param iova_start : Start address of region
+ * @param iova_len   : length of region
+ */
+struct cam_smmu_region_info {
+	dma_addr_t iova_start;
+	size_t iova_len;
+};
+
+/**
+ * @brief           : Gets an smmu handle
+ *
+ * @param identifier: Unique identifier to be used by clients which they
+ *                    should get from device tree. CAM SMMU driver will
+ *                    not enforce how this string is obtained and will
+ *                    only validate this against the list of permitted
+ *                    identifiers
+ * @param handle_ptr: Based on the indentifier, CAM SMMU drivier will
+ *                    fill the handle pointed by handle_ptr
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_handle(char *identifier, int *handle_ptr);
+
+/**
+ * @brief       : Performs IOMMU operations
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param op    : Operation to be performed. Can be either CAM_SMMU_ATTACH
+ *                or CAM_SMMU_DETACH
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_ops(int handle, enum cam_smmu_ops_param op);
+
+/**
+ * @brief       : Maps user space IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ * @dir         : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ *                DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr    : Pointer to physical address where mapped address will be
+ *                returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ *                CAM_SMMU_REGION_SHARED, dma_addr is used as an input parameter
+ *                which specifies the cpu virtual address to map.
+ * @len_ptr     : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_user_iova(int handle,
+	int ion_fd, enum cam_smmu_map_dir dir,
+	dma_addr_t *dma_addr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id);
+
+/**
+ * @brief        : Maps kernel space IOVA for calling driver
+ *
+ * @param handle : Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param buf    : dma_buf allocated for kernel usage in mem_mgr
+ * @dir          : Mapping direction: which will traslate toDMA_BIDIRECTIONAL,
+ *                 DMA_TO_DEVICE or DMA_FROM_DEVICE
+ * @dma_addr     : Pointer to physical address where mapped address will be
+ *                 returned if region_id is CAM_SMMU_REGION_IO. If region_id is
+ *                 CAM_SMMU_REGION_SHARED, dma_addr is used as an input
+ *                 parameter which specifies the cpu virtual address to map.
+ * @len_ptr      : Length of buffer mapped returned by CAM SMMU driver.
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_kernel_iova(int handle,
+	struct dma_buf *buf, enum cam_smmu_map_dir dir,
+	dma_addr_t *dma_addr, size_t *len_ptr,
+	enum cam_smmu_region_id region_id);
+
+/**
+ * @brief       : Unmaps user space IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param ion_fd: ION handle identifying the memory buffer.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_user_iova(int handle,
+	int ion_fd, enum cam_smmu_region_id region_id);
+
+/**
+ * @brief       : Unmaps kernel IOVA for calling driver
+ *
+ * @param handle: Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param buf   : dma_buf allocated for the kernel
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_kernel_iova(int handle,
+	struct dma_buf *buf, enum cam_smmu_region_id region_id);
+
+/**
+ * @brief          : Allocates a scratch buffer
+ *
+ * This function allocates a scratch virtual buffer of length virt_len in the
+ * device virtual address space mapped to phys_len physically contiguous bytes
+ * in that device's SMMU.
+ *
+ * virt_len and phys_len are expected to be aligned to PAGE_SIZE and with each
+ * other, otherwise -EINVAL is returned.
+ *
+ * -EINVAL will be returned if virt_len is less than phys_len.
+ *
+ * Passing a too large phys_len might also cause failure if that much size is
+ * not available for allocation in a physically contiguous way.
+ *
+ * @param handle   : Handle to identify the CAMSMMU client (VFE, CPP, FD etc.)
+ * @param dir      : Direction of mapping which will translate to IOMMU_READ
+ *                   IOMMU_WRITE or a bit mask of both.
+ * @param paddr_ptr: Device virtual address that the client device will be
+ *                   able to read from/write to
+ * @param virt_len : Virtual length of the scratch buffer
+ * @param phys_len : Physical length of the scratch buffer
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_get_scratch_iova(int handle,
+	enum cam_smmu_map_dir dir,
+	dma_addr_t *paddr_ptr,
+	size_t virt_len,
+	size_t phys_len);
+
+/**
+ * @brief          : Frees a scratch buffer
+ *
+ * This function frees a scratch buffer and releases the corresponding SMMU
+ * mappings.
+ *
+ * @param handle   : Handle to identify the CAMSMMU client (IFE, ICP, etc.)
+ * @param paddr    : Device virtual address of client's scratch buffer that
+ *                   will be freed.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+
+int cam_smmu_put_scratch_iova(int handle,
+	dma_addr_t paddr);
+
+/**
+ *@brief        : Destroys an smmu handle
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_destroy_handle(int handle);
+
+/**
+ * @brief       : Finds index by handle in the smmu client table
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @return Index of SMMU client. Nagative in case of error.
+ */
+int cam_smmu_find_index_by_handle(int hdl);
+
+/**
+ * @brief       : Registers smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param handler_cb: It is triggered in IOMMU page fault
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_set_client_page_fault_handler(int handle,
+	cam_smmu_client_page_fault_handler handler_cb, void *token);
+
+/**
+ * @brief       : Unregisters smmu fault handler for client
+ *
+ * @param handle: Handle to identify the CAM SMMU client (VFE, CPP, FD etc.)
+ * @param token: It is input param when trigger page fault handler
+ */
+void cam_smmu_unset_client_page_fault_handler(int handle, void *token);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr);
+
+/**
+ * @brief Maps memory from an ION fd into IOVA space
+ *
+ * @param handle: SMMU handle identifying the secure context bank to map to
+ * @param ion_fd: ION fd of memory to map to
+ * @param paddr_ptr: Pointer IOVA address that will be returned
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_stage2_iova(int handle, int ion_fd,
+	dma_addr_t *paddr_ptr, size_t *len_ptr);
+
+/**
+ * @brief Unmaps memory from context bank
+ *
+ * @param handle: SMMU handle identifying the context bank
+ * @param ion_fd: ION fd of memory to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_put_iova(int handle, int ion_fd);
+
+/**
+ * @brief Maps secure memory for SMMU handle
+ *
+ * @param handle: SMMU handle identifying secure context bank
+ * @param ion_fd: ION fd to map securely
+ * @param dir: DMA Direction for the mapping
+ * @param dma_addr: Returned IOVA address after mapping
+ * @param len_ptr: Length of memory mapped
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_map_stage2_iova(int handle,
+	int ion_fd, enum cam_smmu_map_dir dir, dma_addr_t *dma_addr,
+	size_t *len_ptr);
+
+/**
+ * @brief Unmaps secure memopry for SMMU handle
+ *
+ * @param handle: SMMU handle identifying secure context bank
+ * @param ion_fd: ION fd to unmap
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_unmap_stage2_iova(int handle, int ion_fd);
+
+/**
+ * @brief Allocates firmware for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying context bank
+ * @param iova: IOVA address of allocated firmware
+ * @param kvaddr: CPU mapped address of allocated firmware
+ * @param len: Length of allocated firmware memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_alloc_firmware(int32_t smmu_hdl,
+	dma_addr_t *iova,
+	uintptr_t *kvaddr,
+	size_t *len);
+
+/**
+ * @brief Deallocates firmware memory for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_dealloc_firmware(int32_t smmu_hdl);
+
+/**
+ * @brief Gets region information specified by smmu handle and region id
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param region_id: Region id for which information is desired
+ * @param region_info: Struct populated with region information
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_get_region_info(int32_t smmu_hdl,
+	enum cam_smmu_region_id region_id,
+	struct cam_smmu_region_info *region_info);
+
+/**
+ * @brief Reserves secondary heap
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ * @param iova: IOVA of secondary heap after reservation has completed
+ * @param buf: Allocated dma_buf for secondary heap
+ * @param request_len: Length of secondary heap after reservation has completed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_reserve_sec_heap(int32_t smmu_hdl,
+	struct dma_buf *buf,
+	dma_addr_t *iova,
+	size_t *request_len);
+
+/**
+ * @brief Releases secondary heap
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_release_sec_heap(int32_t smmu_hdl);
+
+/**
+ * @brief Allocates qdss for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying context bank
+ * @param iova: IOVA address of allocated qdss
+ * @param len: Length of allocated qdss memory
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_alloc_qdss(int32_t smmu_hdl,
+	dma_addr_t *iova,
+	size_t *len);
+
+/**
+ * @brief Deallocates qdss memory for context bank
+ *
+ * @param smmu_hdl: SMMU handle identifying the context bank
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_smmu_dealloc_qdss(int32_t smmu_hdl);
+
+#endif /* _CAM_SMMU_API_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/Makefile b/drivers/media/platform/msm/camera/cam_sync/Makefile
new file mode 100644
index 0000000..13ae8f1
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_utils
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_sync.o cam_sync_util.o
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
new file mode 100644
index 0000000..01d136b
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync.c
@@ -0,0 +1,1086 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/irqflags.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include "cam_sync_util.h"
+#include "cam_debug_util.h"
+#include "cam_common_util.h"
+
+struct sync_device *sync_dev;
+
+/*
+ * Flag to determine whether to enqueue cb of a
+ * signaled fence onto the workq or invoke it
+ * directly in the same context
+ */
+static bool trigger_cb_without_switch;
+
+int cam_sync_create(int32_t *sync_obj, const char *name)
+{
+	int rc;
+	long idx;
+	bool bit;
+
+	do {
+		idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+		if (idx >= CAM_SYNC_MAX_OBJS)
+			return -ENOMEM;
+		CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
+		bit = test_and_set_bit(idx, sync_dev->bitmap);
+	} while (bit);
+
+	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+	rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
+		CAM_SYNC_TYPE_INDV);
+	if (rc) {
+		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+			idx);
+		clear_bit(idx, sync_dev->bitmap);
+		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+		return -EINVAL;
+	}
+
+	*sync_obj = idx;
+	CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
+	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+	return rc;
+}
+
+int cam_sync_register_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj)
+{
+	struct sync_callback_info *sync_cb;
+	struct sync_table_row *row = NULL;
+	int status = 0;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
+		return -EINVAL;
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row = sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj %d",
+			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return -EINVAL;
+	}
+
+	sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
+	if (!sync_cb) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return -ENOMEM;
+	}
+
+	/* Trigger callback if sync object is already in SIGNALED state */
+	if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
+		row->state == CAM_SYNC_STATE_SIGNALED_ERROR) &&
+		(!row->remaining)) {
+		if (trigger_cb_without_switch) {
+			CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
+				sync_obj);
+			status = row->state;
+			kfree(sync_cb);
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+			cb_func(sync_obj, status, userdata);
+		} else {
+			sync_cb->callback_func = cb_func;
+			sync_cb->cb_data = userdata;
+			sync_cb->sync_obj = sync_obj;
+			INIT_WORK(&sync_cb->cb_dispatch_work,
+				cam_sync_util_cb_dispatch);
+			sync_cb->status = row->state;
+			CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
+				sync_cb->sync_obj);
+			queue_work(sync_dev->work_queue,
+				&sync_cb->cb_dispatch_work);
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		}
+
+		return 0;
+	}
+
+	sync_cb->callback_func = cb_func;
+	sync_cb->cb_data = userdata;
+	sync_cb->sync_obj = sync_obj;
+	INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
+	list_add_tail(&sync_cb->list, &row->callback_list);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
+	return 0;
+}
+
+int cam_sync_deregister_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj)
+{
+	struct sync_table_row *row = NULL;
+	struct sync_callback_info *sync_cb, *temp;
+	bool found = false;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row = sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
+			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return -EINVAL;
+	}
+
+	CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
+		sync_obj);
+	list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
+		if (sync_cb->callback_func == cb_func &&
+			sync_cb->cb_data == userdata) {
+			list_del_init(&sync_cb->list);
+			kfree(sync_cb);
+			found = true;
+		}
+	}
+
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return found ? 0 : -ENOENT;
+}
+
+int cam_sync_signal(int32_t sync_obj, uint32_t status)
+{
+	struct sync_table_row *row = NULL;
+	struct sync_table_row *parent_row = NULL;
+	struct sync_parent_info *parent_info, *temp_parent_info;
+	struct list_head parents_list;
+	int rc = 0;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
+		CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
+			sync_obj, CAM_SYNC_MAX_OBJS);
+		return -EINVAL;
+	}
+	row = sync_dev->sync_table + sync_obj;
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
+			sync_obj);
+		return -EINVAL;
+	}
+
+	if (row->type == CAM_SYNC_TYPE_GROUP) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		CAM_ERR(CAM_SYNC,
+			"Error: Signaling a GROUP sync object = %d",
+			sync_obj);
+		return -EINVAL;
+	}
+
+	if (row->state != CAM_SYNC_STATE_ACTIVE) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		CAM_ERR(CAM_SYNC,
+			"Error: Sync object already signaled sync_obj = %d",
+			sync_obj);
+		return -EALREADY;
+	}
+
+	if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
+		status != CAM_SYNC_STATE_SIGNALED_ERROR) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		CAM_ERR(CAM_SYNC,
+			"Error: signaling with undefined status = %d",
+			status);
+		return -EINVAL;
+	}
+
+	if (!atomic_dec_and_test(&row->ref_cnt)) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return 0;
+	}
+
+	row->state = status;
+	cam_sync_util_dispatch_signaled_cb(sync_obj, status);
+
+	/* copy parent list to local and release child lock */
+	INIT_LIST_HEAD(&parents_list);
+	list_splice_init(&row->parents_list, &parents_list);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+
+	if (list_empty(&parents_list))
+		return 0;
+
+	/*
+	 * Now iterate over all parents of this object and if they too need to
+	 * be signaled dispatch cb's
+	 */
+	list_for_each_entry_safe(parent_info,
+		temp_parent_info,
+		&parents_list,
+		list) {
+		parent_row = sync_dev->sync_table + parent_info->sync_id;
+		spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+		parent_row->remaining--;
+
+		rc = cam_sync_util_update_parent_state(
+			parent_row,
+			status);
+		if (rc) {
+			CAM_ERR(CAM_SYNC, "Invalid parent state %d",
+				parent_row->state);
+			spin_unlock_bh(
+				&sync_dev->row_spinlocks[parent_info->sync_id]);
+			kfree(parent_info);
+			continue;
+		}
+
+		if (!parent_row->remaining)
+			cam_sync_util_dispatch_signaled_cb(
+				parent_info->sync_id, parent_row->state);
+
+		spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+		list_del_init(&parent_info->list);
+		kfree(parent_info);
+	}
+
+	return 0;
+}
+
+int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
+{
+	int rc;
+	long idx = 0;
+	bool bit;
+
+	if (!sync_obj || !merged_obj) {
+		CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
+		return -EINVAL;
+	}
+
+	if (num_objs <= 1) {
+		CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
+		return -EINVAL;
+	}
+
+	if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
+		!= num_objs) {
+		CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
+		return -EINVAL;
+	}
+
+	do {
+		idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+		if (idx >= CAM_SYNC_MAX_OBJS)
+			return -ENOMEM;
+		bit = test_and_set_bit(idx, sync_dev->bitmap);
+	} while (bit);
+
+	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+	rc = cam_sync_init_group_object(sync_dev->sync_table,
+		idx, sync_obj,
+		num_objs);
+	if (rc < 0) {
+		CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
+			idx);
+		clear_bit(idx, sync_dev->bitmap);
+		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+		return -EINVAL;
+	}
+	CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
+	*merged_obj = idx;
+	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+	return 0;
+}
+
+int cam_sync_get_obj_ref(int32_t sync_obj)
+{
+	struct sync_table_row *row = NULL;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	row = sync_dev->sync_table + sync_obj;
+
+	spin_lock(&sync_dev->row_spinlocks[sync_obj]);
+
+	if (row->state != CAM_SYNC_STATE_ACTIVE) {
+		spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
+			sync_obj);
+		return -EINVAL;
+	}
+
+	atomic_inc(&row->ref_cnt);
+	spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
+	CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
+
+	return 0;
+}
+
+int cam_sync_put_obj_ref(int32_t sync_obj)
+{
+	struct sync_table_row *row = NULL;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	row = sync_dev->sync_table + sync_obj;
+	atomic_dec(&row->ref_cnt);
+	CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
+
+	return 0;
+}
+
+int cam_sync_destroy(int32_t sync_obj)
+{
+	CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
+	return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
+}
+
+int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
+{
+	unsigned long timeleft;
+	int rc = -EINVAL;
+	struct sync_table_row *row = NULL;
+
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	row = sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
+			sync_obj);
+		return -EINVAL;
+	}
+
+	timeleft = wait_for_completion_timeout(&row->signaled,
+		msecs_to_jiffies(timeout_ms));
+
+	if (!timeleft) {
+		CAM_ERR(CAM_SYNC,
+			"Error: timed out for sync obj = %d", sync_obj);
+		rc = -ETIMEDOUT;
+	} else {
+		switch (row->state) {
+		case CAM_SYNC_STATE_INVALID:
+		case CAM_SYNC_STATE_ACTIVE:
+		case CAM_SYNC_STATE_SIGNALED_ERROR:
+			CAM_ERR(CAM_SYNC,
+				"Error: Wait on invalid state = %d, obj = %d",
+				row->state, sync_obj);
+			rc = -EINVAL;
+			break;
+		case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+			rc = 0;
+			break;
+		default:
+			rc = -EINVAL;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_info sync_create;
+	int result;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_info))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_create,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	result = cam_sync_create(&sync_create.sync_obj,
+		sync_create.name);
+
+	if (!result)
+		if (copy_to_user(
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&sync_create,
+			k_ioctl->size))
+			return -EFAULT;
+
+	return result;
+}
+
+static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_signal sync_signal;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_signal))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_signal,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	/* need to get ref for UMD signaled fences */
+	cam_sync_get_obj_ref(sync_signal.sync_obj);
+	return cam_sync_signal(sync_signal.sync_obj,
+		sync_signal.sync_state);
+}
+
+static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_merge sync_merge;
+	uint32_t *sync_objs;
+	uint32_t num_objs;
+	uint32_t size;
+	int result;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_merge))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_merge,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
+		return -EINVAL;
+
+	size = sizeof(uint32_t) * sync_merge.num_objs;
+	sync_objs = kzalloc(size, GFP_ATOMIC);
+
+	if (!sync_objs)
+		return -ENOMEM;
+
+	if (copy_from_user(sync_objs,
+		u64_to_user_ptr(sync_merge.sync_objs),
+		sizeof(uint32_t) * sync_merge.num_objs)) {
+		kfree(sync_objs);
+		return -EFAULT;
+	}
+
+	num_objs = sync_merge.num_objs;
+
+	result = cam_sync_merge(sync_objs,
+		num_objs,
+		&sync_merge.merged);
+
+	if (!result)
+		if (copy_to_user(
+			u64_to_user_ptr(k_ioctl->ioctl_ptr),
+			&sync_merge,
+			k_ioctl->size)) {
+			kfree(sync_objs);
+			return -EFAULT;
+	}
+
+	kfree(sync_objs);
+
+	return result;
+}
+
+static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_wait sync_wait;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_wait))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_wait,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
+		sync_wait.timeout_ms);
+
+	return 0;
+}
+
+static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_info sync_create;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_info))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&sync_create,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	return cam_sync_destroy(sync_create.sync_obj);
+}
+
+static int cam_sync_handle_register_user_payload(
+	struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_userpayload_info userpayload_info;
+	struct sync_user_payload *user_payload_kernel;
+	struct sync_user_payload *user_payload_iter;
+	struct sync_user_payload *temp_upayload_kernel;
+	uint32_t sync_obj;
+	struct sync_table_row *row = NULL;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
+		return -EINVAL;
+
+	if (!k_ioctl->ioctl_ptr)
+		return -EINVAL;
+
+	if (copy_from_user(&userpayload_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	sync_obj = userpayload_info.sync_obj;
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
+	if (!user_payload_kernel)
+		return -ENOMEM;
+
+	memcpy(user_payload_kernel->payload_data,
+		userpayload_info.payload,
+		CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row =  sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
+			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		kfree(user_payload_kernel);
+		return -EINVAL;
+	}
+
+	if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
+		row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
+
+		cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+			sync_obj,
+			row->state,
+			user_payload_kernel->payload_data,
+			CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
+
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		kfree(user_payload_kernel);
+		return 0;
+	}
+
+	list_for_each_entry_safe(user_payload_iter,
+		temp_upayload_kernel,
+		&row->user_payload_list,
+		list) {
+		if (user_payload_iter->payload_data[0] ==
+				user_payload_kernel->payload_data[0] &&
+			user_payload_iter->payload_data[1] ==
+				user_payload_kernel->payload_data[1]) {
+
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+			kfree(user_payload_kernel);
+			return -EALREADY;
+		}
+	}
+
+	list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return 0;
+}
+
+static int cam_sync_handle_deregister_user_payload(
+	struct cam_private_ioctl_arg *k_ioctl)
+{
+	struct cam_sync_userpayload_info userpayload_info;
+	struct sync_user_payload *user_payload_kernel, *temp;
+	uint32_t sync_obj;
+	struct sync_table_row *row = NULL;
+
+	if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
+		CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
+		return -EINVAL;
+	}
+
+	if (!k_ioctl->ioctl_ptr) {
+		CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&userpayload_info,
+		u64_to_user_ptr(k_ioctl->ioctl_ptr),
+		k_ioctl->size))
+		return -EFAULT;
+
+	sync_obj = userpayload_info.sync_obj;
+	if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
+		return -EINVAL;
+
+	spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	row =  sync_dev->sync_table + sync_obj;
+
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj = %d",
+			sync_obj);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+		return -EINVAL;
+	}
+
+	list_for_each_entry_safe(user_payload_kernel, temp,
+				&row->user_payload_list, list) {
+		if (user_payload_kernel->payload_data[0] ==
+				userpayload_info.payload[0] &&
+				user_payload_kernel->payload_data[1] ==
+				userpayload_info.payload[1]) {
+			list_del_init(&user_payload_kernel->list);
+			kfree(user_payload_kernel);
+		}
+	}
+
+	spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
+	return 0;
+}
+
+static long cam_sync_dev_ioctl(struct file *filep, void *fh,
+		bool valid_prio, unsigned int cmd, void *arg)
+{
+	int32_t rc;
+	struct sync_device *sync_dev = video_drvdata(filep);
+	struct cam_private_ioctl_arg k_ioctl;
+
+	if (!sync_dev) {
+		CAM_ERR(CAM_SYNC, "sync_dev NULL");
+		return -EINVAL;
+	}
+
+	if (!arg)
+		return -EINVAL;
+
+	if (cmd != CAM_PRIVATE_IOCTL_CMD)
+		return -ENOIOCTLCMD;
+
+	k_ioctl = *(struct cam_private_ioctl_arg *)arg;
+
+	switch (k_ioctl.id) {
+	case CAM_SYNC_CREATE:
+		rc = cam_sync_handle_create(&k_ioctl);
+		break;
+	case CAM_SYNC_DESTROY:
+		rc = cam_sync_handle_destroy(&k_ioctl);
+		break;
+	case CAM_SYNC_REGISTER_PAYLOAD:
+		rc = cam_sync_handle_register_user_payload(
+			&k_ioctl);
+		break;
+	case CAM_SYNC_DEREGISTER_PAYLOAD:
+		rc = cam_sync_handle_deregister_user_payload(
+			&k_ioctl);
+		break;
+	case CAM_SYNC_SIGNAL:
+		rc = cam_sync_handle_signal(&k_ioctl);
+		break;
+	case CAM_SYNC_MERGE:
+		rc = cam_sync_handle_merge(&k_ioctl);
+		break;
+	case CAM_SYNC_WAIT:
+		rc = cam_sync_handle_wait(&k_ioctl);
+		((struct cam_private_ioctl_arg *)arg)->result =
+			k_ioctl.result;
+		break;
+	default:
+		rc = -ENOIOCTLCMD;
+	}
+
+	return rc;
+}
+
+static unsigned int cam_sync_poll(struct file *f,
+	struct poll_table_struct *pll_table)
+{
+	int rc = 0;
+	struct v4l2_fh *eventq = f->private_data;
+
+	if (!eventq)
+		return -EINVAL;
+
+	poll_wait(f, &eventq->wait, pll_table);
+
+	if (v4l2_event_pending(eventq))
+		rc = POLLPRI;
+
+	return rc;
+}
+
+static int cam_sync_open(struct file *filep)
+{
+	int rc;
+	struct sync_device *sync_dev = video_drvdata(filep);
+
+	if (!sync_dev) {
+		CAM_ERR(CAM_SYNC, "Sync device NULL");
+		return -ENODEV;
+	}
+
+	mutex_lock(&sync_dev->table_lock);
+	if (sync_dev->open_cnt >= 1) {
+		mutex_unlock(&sync_dev->table_lock);
+		return -EALREADY;
+	}
+
+	rc = v4l2_fh_open(filep);
+	if (!rc) {
+		sync_dev->open_cnt++;
+		spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+		sync_dev->cam_sync_eventq = filep->private_data;
+		spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+	} else {
+		CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
+	}
+	mutex_unlock(&sync_dev->table_lock);
+
+	return rc;
+}
+
+static int cam_sync_close(struct file *filep)
+{
+	int rc = 0;
+	int i;
+	struct sync_device *sync_dev = video_drvdata(filep);
+
+	if (!sync_dev) {
+		CAM_ERR(CAM_SYNC, "Sync device NULL");
+		rc = -ENODEV;
+		return rc;
+	}
+	mutex_lock(&sync_dev->table_lock);
+	sync_dev->open_cnt--;
+	if (!sync_dev->open_cnt) {
+		for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
+			struct sync_table_row *row =
+			sync_dev->sync_table + i;
+
+			/*
+			 * Signal all ACTIVE objects as ERR, but we don't
+			 * care about the return status here apart from logging
+			 * it.
+			 */
+			if (row->state == CAM_SYNC_STATE_ACTIVE) {
+				rc = cam_sync_signal(i,
+					CAM_SYNC_STATE_SIGNALED_ERROR);
+				if (rc < 0)
+					CAM_ERR(CAM_SYNC,
+					  "Cleanup signal fail idx:%d\n",
+					  i);
+			}
+		}
+
+		/*
+		 * Flush the work queue to wait for pending signal callbacks to
+		 * finish
+		 */
+		flush_workqueue(sync_dev->work_queue);
+
+		/*
+		 * Now that all callbacks worker threads have finished,
+		 * destroy the sync objects
+		 */
+		for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
+			struct sync_table_row *row =
+			sync_dev->sync_table + i;
+
+			if (row->state != CAM_SYNC_STATE_INVALID) {
+				rc = cam_sync_destroy(i);
+				if (rc < 0)
+					CAM_ERR(CAM_SYNC,
+					  "Cleanup destroy fail:idx:%d\n",
+					  i);
+			}
+		}
+	}
+	mutex_unlock(&sync_dev->table_lock);
+	spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+	sync_dev->cam_sync_eventq = NULL;
+	spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+	v4l2_fh_release(filep);
+
+	return rc;
+}
+
+int cam_sync_subscribe_event(struct v4l2_fh *fh,
+		const struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS, NULL);
+}
+
+int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
+		const struct v4l2_event_subscription *sub)
+{
+	return v4l2_event_unsubscribe(fh, sub);
+}
+
+static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
+	.vidioc_subscribe_event = cam_sync_subscribe_event,
+	.vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
+	.vidioc_default = cam_sync_dev_ioctl,
+};
+
+static struct v4l2_file_operations cam_sync_v4l2_fops = {
+	.owner = THIS_MODULE,
+	.open  = cam_sync_open,
+	.release = cam_sync_close,
+	.poll = cam_sync_poll,
+	.unlocked_ioctl   = video_ioctl2,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl32 = video_ioctl2,
+#endif
+};
+
+#if defined(CONFIG_MEDIA_CONTROLLER)
+static int cam_sync_media_controller_init(struct sync_device *sync_dev,
+	struct platform_device *pdev)
+{
+	int rc;
+
+	sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
+		GFP_KERNEL);
+	if (!sync_dev->v4l2_dev.mdev)
+		return -ENOMEM;
+
+	media_device_init(sync_dev->v4l2_dev.mdev);
+	strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
+			sizeof(sync_dev->v4l2_dev.mdev->model));
+	sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
+
+	rc = media_device_register(sync_dev->v4l2_dev.mdev);
+	if (rc < 0)
+		goto register_fail;
+
+	rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
+	if (rc < 0)
+		goto entity_fail;
+
+	return 0;
+
+entity_fail:
+	media_device_unregister(sync_dev->v4l2_dev.mdev);
+register_fail:
+	media_device_cleanup(sync_dev->v4l2_dev.mdev);
+	return rc;
+}
+
+static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
+{
+	media_entity_cleanup(&sync_dev->vdev->entity);
+	media_device_unregister(sync_dev->v4l2_dev.mdev);
+	media_device_cleanup(sync_dev->v4l2_dev.mdev);
+	kfree(sync_dev->v4l2_dev.mdev);
+}
+
+static void cam_sync_init_entity(struct sync_device *sync_dev)
+{
+	sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
+	sync_dev->vdev->entity.name =
+				video_device_node_name(sync_dev->vdev);
+}
+#else
+static int cam_sync_media_controller_init(struct sync_device *sync_dev,
+	struct platform_device *pdev)
+{
+	return 0;
+}
+
+static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
+{
+}
+
+static void cam_sync_init_entity(struct sync_device *sync_dev)
+{
+}
+#endif
+
+static int cam_sync_create_debugfs(void)
+{
+	sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
+
+	if (!sync_dev->dentry) {
+		CAM_ERR(CAM_SYNC, "Failed to create sync dir");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_bool("trigger_cb_without_switch",
+		0644, sync_dev->dentry,
+		&trigger_cb_without_switch)) {
+		CAM_ERR(CAM_SYNC,
+			"failed to create trigger_cb_without_switch entry");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int cam_sync_probe(struct platform_device *pdev)
+{
+	int rc;
+	int idx;
+
+	sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
+	if (!sync_dev)
+		return -ENOMEM;
+
+	mutex_init(&sync_dev->table_lock);
+	spin_lock_init(&sync_dev->cam_sync_eventq_lock);
+
+	for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
+		spin_lock_init(&sync_dev->row_spinlocks[idx]);
+
+	sync_dev->vdev = video_device_alloc();
+	if (!sync_dev->vdev) {
+		rc = -ENOMEM;
+		goto vdev_fail;
+	}
+
+	rc = cam_sync_media_controller_init(sync_dev, pdev);
+	if (rc < 0)
+		goto mcinit_fail;
+
+	sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
+
+	rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
+	if (rc < 0)
+		goto register_fail;
+
+	strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
+				sizeof(sync_dev->vdev->name));
+	sync_dev->vdev->release  = video_device_release;
+	sync_dev->vdev->fops     = &cam_sync_v4l2_fops;
+	sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
+	sync_dev->vdev->minor     = -1;
+	sync_dev->vdev->vfl_type  = VFL_TYPE_GRABBER;
+	rc = video_register_device(sync_dev->vdev,
+		VFL_TYPE_GRABBER, -1);
+	if (rc < 0)
+		goto v4l2_fail;
+
+	cam_sync_init_entity(sync_dev);
+	video_set_drvdata(sync_dev->vdev, sync_dev);
+	memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
+	memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
+	bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+
+	/*
+	 * We treat zero as invalid handle, so we will keep the 0th bit set
+	 * always
+	 */
+	set_bit(0, sync_dev->bitmap);
+
+	sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
+		WQ_HIGHPRI | WQ_UNBOUND, 1);
+
+	if (!sync_dev->work_queue) {
+		CAM_ERR(CAM_SYNC,
+			"Error: high priority work queue creation failed");
+		rc = -ENOMEM;
+		goto v4l2_fail;
+	}
+
+	trigger_cb_without_switch = false;
+	cam_sync_create_debugfs();
+
+	return rc;
+
+v4l2_fail:
+	v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
+register_fail:
+	cam_sync_media_controller_cleanup(sync_dev);
+mcinit_fail:
+	video_device_release(sync_dev->vdev);
+vdev_fail:
+	mutex_destroy(&sync_dev->table_lock);
+	kfree(sync_dev);
+	return rc;
+}
+
+static int cam_sync_remove(struct platform_device *pdev)
+{
+	v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
+	cam_sync_media_controller_cleanup(sync_dev);
+	video_device_release(sync_dev->vdev);
+	debugfs_remove_recursive(sync_dev->dentry);
+	sync_dev->dentry = NULL;
+	kfree(sync_dev);
+	sync_dev = NULL;
+
+	return 0;
+}
+
+static struct platform_device cam_sync_device = {
+	.name = "cam_sync",
+	.id = -1,
+};
+
+static struct platform_driver cam_sync_driver = {
+	.probe = cam_sync_probe,
+	.remove = cam_sync_remove,
+	.driver = {
+		.name = "cam_sync",
+		.owner = THIS_MODULE,
+		.suppress_bind_attrs = true,
+	},
+};
+
+static int __init cam_sync_init(void)
+{
+	int rc;
+
+	rc = platform_device_register(&cam_sync_device);
+	if (rc)
+		return -ENODEV;
+
+	return platform_driver_register(&cam_sync_driver);
+}
+
+static void __exit cam_sync_exit(void)
+{
+	int idx;
+
+	for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
+		spin_lock_init(&sync_dev->row_spinlocks[idx]);
+	platform_driver_unregister(&cam_sync_driver);
+	platform_device_unregister(&cam_sync_device);
+	kfree(sync_dev);
+}
+
+module_init(cam_sync_init);
+module_exit(cam_sync_exit);
+MODULE_DESCRIPTION("Camera sync driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h
new file mode 100644
index 0000000..75640e73
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_api.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CAM_SYNC_API_H__
+#define __CAM_SYNC_API_H__
+
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/videodev2.h>
+#include <uapi/media/cam_sync.h>
+
+#define SYNC_DEBUG_NAME_LEN 63
+typedef void (*sync_callback)(int32_t sync_obj, int status, void *data);
+
+/* Kernel APIs */
+
+/**
+ * @brief: Creates a sync object
+ *
+ *  The newly created sync obj is assigned to sync_obj.
+ *  sync object.
+ *
+ * @param sync_obj   : Pointer to int referencing the sync object.
+ * @param name : Optional parameter associating a name with the sync object for
+ * debug purposes. Only first SYNC_DEBUG_NAME_LEN bytes are accepted,
+ * rest will be ignored.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if sync_obj is an invalid pointer.
+ * -ENOMEM will be returned if the kernel can't allocate space for
+ * sync object.
+ */
+int cam_sync_create(int32_t *sync_obj, const char *name);
+
+/**
+ * @brief: Registers a callback with a sync object
+ *
+ * @param cb_func:  Pointer to callback to be registered
+ * @param userdata: Opaque pointer which will be passed back with callback.
+ * @param sync_obj: int referencing the sync object.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ *
+ */
+int cam_sync_register_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj);
+
+/**
+ * @brief: De-registers a callback with a sync object
+ *
+ * @param cb_func:  Pointer to callback to be de-registered
+ * @param userdata: Opaque pointer which will be passed back with callback.
+ * @param sync_obj: int referencing the sync object.
+ *
+ * @return Status of operation. Zero in case of success.
+ * -EINVAL will be returned if userdata is invalid.
+ * -ENOMEM will be returned if cb_func is invalid.
+ */
+int cam_sync_deregister_callback(sync_callback cb_func,
+	void *userdata, int32_t sync_obj);
+
+/**
+ * @brief: Signals a sync object with the status argument.
+ *
+ * This function will signal the sync object referenced by the sync_obj
+ * parameter and when doing so, will trigger callbacks in both user space and
+ * kernel. Callbacks will triggered asynchronously and their order of execution
+ * is not guaranteed. The status parameter will indicate whether the entity
+ * performing the signaling wants to convey an error case or a success case.
+ *
+ * @param sync_obj: int referencing the sync object.
+ * @param status: Status of the signaling. Can be either SYNC_SIGNAL_ERROR or
+ * SYNC_SIGNAL_SUCCESS.
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_signal(int32_t sync_obj, uint32_t status);
+
+/**
+ * @brief: Merges multiple sync objects
+ *
+ * This function will merge multiple sync objects into a sync group.
+ *
+ * @param sync_obj: pointer to a block of ints to be merged
+ * @param num_objs: Number of ints in the block
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj);
+
+/**
+ * @brief: get ref count of sync obj
+ *
+ * This function will increment ref count for the sync object, and the ref
+ * count will be decremented when this sync object is signaled.
+ *
+ * @param sync_obj: sync object
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_get_obj_ref(int32_t sync_obj);
+
+/**
+ * @brief: put ref count of sync obj
+ *
+ * This function will decrement ref count for the sync object.
+ *
+ * @param sync_obj: sync object
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_put_obj_ref(int32_t sync_obj);
+
+/**
+ * @brief: Destroys a sync object
+ *
+ * @param sync_obj: int referencing the sync object to be destroyed
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_destroy(int32_t sync_obj);
+
+/**
+ * @brief: Waits for a sync object synchronously
+ *
+ * Does a wait on the sync object identified by sync_obj for a maximum
+ * of timeout_ms milliseconds. Must not be called from interrupt context as
+ * this API can sleep. Should be called from process context only.
+ *
+ * @param sync_obj: int referencing the sync object to be waited upon
+ * @timeout_ms sync_obj: Timeout in ms.
+ *
+ * @return 0 upon success, -EINVAL if sync object is in bad state or arguments
+ * are invalid, -ETIMEDOUT if wait times out.
+ */
+int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms);
+
+
+#endif /* __CAM_SYNC_API_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
new file mode 100644
index 0000000..b13bfaf
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_private.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CAM_SYNC_PRIVATE_H__
+#define __CAM_SYNC_PRIVATE_H__
+
+#include <linux/bitmap.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/debugfs.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+
+#ifdef CONFIG_CAM_SYNC_DBG
+#define CDBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define CDBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+
+#define CAM_SYNC_OBJ_NAME_LEN           64
+#define CAM_SYNC_MAX_OBJS               1024
+#define CAM_SYNC_MAX_V4L2_EVENTS        50
+#define CAM_SYNC_DEBUG_FILENAME         "cam_debug"
+#define CAM_SYNC_DEBUG_BASEDIR          "cam"
+#define CAM_SYNC_DEBUG_BUF_SIZE         32
+#define CAM_SYNC_PAYLOAD_WORDS          2
+#define CAM_SYNC_NAME                   "cam_sync"
+#define CAM_SYNC_WORKQUEUE_NAME         "HIPRIO_SYNC_WORK_QUEUE"
+
+#define CAM_SYNC_TYPE_INDV              0
+#define CAM_SYNC_TYPE_GROUP             1
+
+/**
+ * enum sync_type - Enum to indicate the type of sync object,
+ * i.e. individual or group.
+ *
+ * @SYNC_TYPE_INDV  : Object is an individual sync object
+ * @SYNC_TYPE_GROUP : Object is a group sync object
+ */
+enum sync_type {
+	SYNC_TYPE_INDV,
+	SYNC_TYPE_GROUP
+};
+
+/**
+ * enum sync_list_clean_type - Enum to indicate the type of list clean action
+ * to be peformed, i.e. specific sync ID or all list sync ids.
+ *
+ * @SYNC_CLEAN_ONE : Specific object to be cleaned in the list
+ * @SYNC_CLEAN_ALL : Clean all objects in the list
+ */
+enum sync_list_clean_type {
+	SYNC_LIST_CLEAN_ONE,
+	SYNC_LIST_CLEAN_ALL
+};
+
+/**
+ * struct sync_parent_info - Single node of information about a parent
+ * of a sync object, usually part of the parents linked list
+ *
+ * @sync_id  : Sync object id of parent
+ * @list     : List member used to append this node to a linked list
+ */
+struct sync_parent_info {
+	int32_t sync_id;
+	struct list_head list;
+};
+
+/**
+ * struct sync_parent_info - Single node of information about a child
+ * of a sync object, usually part of the children linked list
+ *
+ * @sync_id  : Sync object id of child
+ * @list     : List member used to append this node to a linked list
+ */
+struct sync_child_info {
+	int32_t sync_id;
+	struct list_head list;
+};
+
+
+/**
+ * struct sync_callback_info - Single node of information about a kernel
+ * callback registered on a sync object
+ *
+ * @callback_func    : Callback function, registered by client driver
+ * @cb_data          : Callback data, registered by client driver
+ * @status........   : Status with which callback will be invoked in client
+ * @sync_obj         : Sync id of the object for which callback is registered
+ * @cb_dispatch_work : Work representing the call dispatch
+ * @list             : List member used to append this node to a linked list
+ */
+struct sync_callback_info {
+	sync_callback callback_func;
+	void *cb_data;
+	int status;
+	int32_t sync_obj;
+	struct work_struct cb_dispatch_work;
+	struct list_head list;
+};
+
+/**
+ * struct sync_user_payload - Single node of information about a user space
+ * payload registered from user space
+ *
+ * @payload_data    : Payload data, opaque to kernel
+ * @list            : List member used to append this node to a linked list
+ */
+struct sync_user_payload {
+	uint64_t payload_data[CAM_SYNC_PAYLOAD_WORDS];
+	struct list_head list;
+};
+
+/**
+ * struct sync_table_row - Single row of information about a sync object, used
+ * for internal book keeping in the sync driver
+ *
+ * @name              : Optional string representation of the sync object
+ * @type              : Type of the sync object (individual or group)
+ * @sync_id           : Integer id representing this sync object
+ * @parents_list      : Linked list of parents of this sync object
+ * @children_list     : Linked list of children of this sync object
+ * @state             : State (INVALID, ACTIVE, SIGNALED_SUCCESS or
+ *                      SIGNALED_ERROR)
+ * @remaining         : Count of remaining children that not been signaled
+ * @signaled          : Completion variable on which block calls will wait
+ * @callback_list     : Linked list of kernel callbacks registered
+ * @user_payload_list : LInked list of user space payloads registered
+ * @ref_cnt           : ref count of the number of usage of the fence.
+ */
+struct sync_table_row {
+	char name[CAM_SYNC_OBJ_NAME_LEN];
+	enum sync_type type;
+	int32_t sync_id;
+	/* List of parents, which are merged objects */
+	struct list_head parents_list;
+	/* List of children, which constitute the merged object */
+	struct list_head children_list;
+	uint32_t state;
+	uint32_t remaining;
+	struct completion signaled;
+	struct list_head callback_list;
+	struct list_head user_payload_list;
+	atomic_t ref_cnt;
+};
+
+/**
+ * struct cam_signalable_info - Information for a single sync object that is
+ * ready to be signaled
+ *
+ * @sync_obj : Sync object id of signalable object
+ * @status   : Status with which to signal
+ * @list     : List member used to append this node to a linked list
+ */
+struct cam_signalable_info {
+	int32_t sync_obj;
+	uint32_t status;
+	struct list_head list;
+};
+
+/**
+ * struct sync_device - Internal struct to book keep sync driver details
+ *
+ * @vdev            : Video device
+ * @v4l2_dev        : V4L2 device
+ * @sync_table      : Table of all sync objects
+ * @row_spinlocks   : Spinlock array, one for each row in the table
+ * @table_lock      : Mutex used to lock the table
+ * @open_cnt        : Count of file open calls made on the sync driver
+ * @dentry          : Debugfs entry
+ * @work_queue      : Work queue used for dispatching kernel callbacks
+ * @cam_sync_eventq : Event queue used to dispatch user payloads to user space
+ * @bitmap          : Bitmap representation of all sync objects
+ */
+struct sync_device {
+	struct video_device *vdev;
+	struct v4l2_device v4l2_dev;
+	struct sync_table_row sync_table[CAM_SYNC_MAX_OBJS];
+	spinlock_t row_spinlocks[CAM_SYNC_MAX_OBJS];
+	struct mutex table_lock;
+	int open_cnt;
+	struct dentry *dentry;
+	struct workqueue_struct *work_queue;
+	struct v4l2_fh *cam_sync_eventq;
+	spinlock_t cam_sync_eventq_lock;
+	DECLARE_BITMAP(bitmap, CAM_SYNC_MAX_OBJS);
+};
+
+
+#endif /* __CAM_SYNC_PRIVATE_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
new file mode 100644
index 0000000..09986be
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.c
@@ -0,0 +1,450 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "cam_sync_util.h"
+
+int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
+	long *idx)
+{
+	int rc = 0;
+
+	mutex_lock(&sync_dev->table_lock);
+
+	*idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
+
+	if (*idx < CAM_SYNC_MAX_OBJS)
+		set_bit(*idx, sync_dev->bitmap);
+	else
+		rc = -1;
+
+	mutex_unlock(&sync_dev->table_lock);
+
+	return rc;
+}
+
+int cam_sync_init_row(struct sync_table_row *table,
+	uint32_t idx, const char *name, uint32_t type)
+{
+	struct sync_table_row *row = table + idx;
+
+	if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+		return -EINVAL;
+
+	memset(row, 0, sizeof(*row));
+
+	if (name)
+		strlcpy(row->name, name, SYNC_DEBUG_NAME_LEN);
+	INIT_LIST_HEAD(&row->parents_list);
+	INIT_LIST_HEAD(&row->children_list);
+	row->type = type;
+	row->sync_id = idx;
+	row->state = CAM_SYNC_STATE_ACTIVE;
+	row->remaining = 0;
+	atomic_set(&row->ref_cnt, 0);
+	init_completion(&row->signaled);
+	INIT_LIST_HEAD(&row->callback_list);
+	INIT_LIST_HEAD(&row->user_payload_list);
+	CAM_DBG(CAM_SYNC,
+		"row name:%s sync_id:%i [idx:%u] row_state:%u ",
+		row->name, row->sync_id, idx, row->state);
+
+	return 0;
+}
+
+int cam_sync_init_group_object(struct sync_table_row *table,
+	uint32_t idx,
+	uint32_t *sync_objs,
+	uint32_t num_objs)
+{
+	int i, rc = 0;
+	struct sync_child_info *child_info;
+	struct sync_parent_info *parent_info;
+	struct sync_table_row *row = table + idx;
+	struct sync_table_row *child_row = NULL;
+
+	cam_sync_init_row(table, idx, "merged_fence", CAM_SYNC_TYPE_GROUP);
+
+	/*
+	 * While traversing for children, parent's row list is updated with
+	 * child info and each child's row is updated with parent info.
+	 * If any child state is ERROR or SUCCESS, it will not be added to list.
+	 */
+	for (i = 0; i < num_objs; i++) {
+		child_row = table + sync_objs[i];
+		spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+
+		/* validate child */
+		if ((child_row->type == CAM_SYNC_TYPE_GROUP) ||
+			(child_row->state == CAM_SYNC_STATE_INVALID)) {
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+			CAM_ERR(CAM_SYNC,
+				"Invalid child fence:%i state:%u type:%u",
+				child_row->sync_id, child_row->state,
+				child_row->type);
+			rc = -EINVAL;
+			goto clean_children_info;
+		}
+
+		/* check for child's state */
+		if (child_row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
+			row->state = CAM_SYNC_STATE_SIGNALED_ERROR;
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+			continue;
+		}
+		if (child_row->state != CAM_SYNC_STATE_ACTIVE) {
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+			continue;
+		}
+
+		row->remaining++;
+
+		/* Add child info */
+		child_info = kzalloc(sizeof(*child_info), GFP_ATOMIC);
+		if (!child_info) {
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+			rc = -ENOMEM;
+			goto clean_children_info;
+		}
+		child_info->sync_id = sync_objs[i];
+		list_add_tail(&child_info->list, &row->children_list);
+
+		/* Add parent info */
+		parent_info = kzalloc(sizeof(*parent_info), GFP_ATOMIC);
+		if (!parent_info) {
+			spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+			rc = -ENOMEM;
+			goto clean_children_info;
+		}
+		parent_info->sync_id = idx;
+		list_add_tail(&parent_info->list, &child_row->parents_list);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+	}
+
+	if (!row->remaining) {
+		if (row->state != CAM_SYNC_STATE_SIGNALED_ERROR)
+			row->state = CAM_SYNC_STATE_SIGNALED_SUCCESS;
+		complete_all(&row->signaled);
+	}
+
+	return 0;
+
+clean_children_info:
+	row->state = CAM_SYNC_STATE_INVALID;
+	for (i = i-1; i >= 0; i--) {
+		spin_lock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+		child_row = table + sync_objs[i];
+		cam_sync_util_cleanup_parents_list(child_row,
+			SYNC_LIST_CLEAN_ONE, idx);
+		spin_unlock_bh(&sync_dev->row_spinlocks[sync_objs[i]]);
+	}
+
+	cam_sync_util_cleanup_children_list(row, SYNC_LIST_CLEAN_ALL, 0);
+	return rc;
+}
+
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx)
+{
+	struct sync_table_row      *row = table + idx;
+	struct sync_child_info     *child_info, *temp_child;
+	struct sync_callback_info  *sync_cb, *temp_cb;
+	struct sync_parent_info    *parent_info, *temp_parent;
+	struct sync_user_payload   *upayload_info, *temp_upayload;
+	struct sync_table_row      *child_row = NULL, *parent_row = NULL;
+	struct list_head            temp_child_list, temp_parent_list;
+
+	if (!table || idx <= 0 || idx >= CAM_SYNC_MAX_OBJS)
+		return -EINVAL;
+
+	CAM_DBG(CAM_SYNC,
+		"row name:%s sync_id:%i [idx:%u] row_state:%u",
+		row->name, row->sync_id, idx, row->state);
+
+	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+	if (row->state == CAM_SYNC_STATE_INVALID) {
+		spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+		CAM_ERR(CAM_SYNC,
+			"Error: accessing an uninitialized sync obj: idx = %d",
+			idx);
+		return -EINVAL;
+	}
+
+	if (row->state == CAM_SYNC_STATE_ACTIVE)
+		CAM_DBG(CAM_SYNC,
+			"Destroying an active sync object name:%s id:%i",
+			row->name, row->sync_id);
+
+	row->state = CAM_SYNC_STATE_INVALID;
+
+	/* Object's child and parent objects will be added into this list */
+	INIT_LIST_HEAD(&temp_child_list);
+	INIT_LIST_HEAD(&temp_parent_list);
+
+	list_for_each_entry_safe(child_info, temp_child, &row->children_list,
+		list) {
+		if (child_info->sync_id <= 0)
+			continue;
+
+		list_del_init(&child_info->list);
+		list_add_tail(&child_info->list, &temp_child_list);
+	}
+
+	list_for_each_entry_safe(parent_info, temp_parent, &row->parents_list,
+		list) {
+		if (parent_info->sync_id <= 0)
+			continue;
+
+		list_del_init(&parent_info->list);
+		list_add_tail(&parent_info->list, &temp_parent_list);
+	}
+
+	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+	/* Cleanup the child to parent link from child list */
+	while (!list_empty(&temp_child_list)) {
+		child_info = list_first_entry(&temp_child_list,
+			struct sync_child_info, list);
+		child_row = sync_dev->sync_table + child_info->sync_id;
+
+		spin_lock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
+
+		if (child_row->state == CAM_SYNC_STATE_INVALID) {
+			list_del_init(&child_info->list);
+			spin_unlock_bh(&sync_dev->row_spinlocks[
+				child_info->sync_id]);
+			kfree(child_info);
+			continue;
+		}
+
+		if (child_row->state == CAM_SYNC_STATE_ACTIVE)
+			CAM_DBG(CAM_SYNC,
+				"Warning: destroying active child sync obj = %d",
+				child_info->sync_id);
+
+		cam_sync_util_cleanup_parents_list(child_row,
+			SYNC_LIST_CLEAN_ONE, idx);
+
+		list_del_init(&child_info->list);
+		spin_unlock_bh(&sync_dev->row_spinlocks[child_info->sync_id]);
+		kfree(child_info);
+	}
+
+	/* Cleanup the parent to child link */
+	while (!list_empty(&temp_parent_list)) {
+		parent_info = list_first_entry(&temp_parent_list,
+			struct sync_parent_info, list);
+		parent_row = sync_dev->sync_table + parent_info->sync_id;
+
+		spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+
+		if (parent_row->state == CAM_SYNC_STATE_INVALID) {
+			list_del_init(&parent_info->list);
+			spin_unlock_bh(&sync_dev->row_spinlocks[
+				parent_info->sync_id]);
+			kfree(parent_info);
+			continue;
+		}
+
+		if (parent_row->state == CAM_SYNC_STATE_ACTIVE)
+			CAM_DBG(CAM_SYNC,
+				"Warning: destroying active parent sync obj = %d",
+				parent_info->sync_id);
+
+		cam_sync_util_cleanup_children_list(parent_row,
+			SYNC_LIST_CLEAN_ONE, idx);
+
+		list_del_init(&parent_info->list);
+		spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
+		kfree(parent_info);
+	}
+
+	spin_lock_bh(&sync_dev->row_spinlocks[idx]);
+	list_for_each_entry_safe(upayload_info, temp_upayload,
+			&row->user_payload_list, list) {
+		list_del_init(&upayload_info->list);
+		kfree(upayload_info);
+	}
+
+	list_for_each_entry_safe(sync_cb, temp_cb,
+			&row->callback_list, list) {
+		list_del_init(&sync_cb->list);
+		kfree(sync_cb);
+	}
+
+	memset(row, 0, sizeof(*row));
+	clear_bit(idx, sync_dev->bitmap);
+	INIT_LIST_HEAD(&row->callback_list);
+	INIT_LIST_HEAD(&row->parents_list);
+	INIT_LIST_HEAD(&row->children_list);
+	INIT_LIST_HEAD(&row->user_payload_list);
+	spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
+
+	CAM_DBG(CAM_SYNC, "Destroying sync obj:%d successful", idx);
+	return 0;
+}
+
+void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work)
+{
+	struct sync_callback_info *cb_info = container_of(cb_dispatch_work,
+		struct sync_callback_info,
+		cb_dispatch_work);
+
+	cb_info->callback_func(cb_info->sync_obj,
+		cb_info->status,
+		cb_info->cb_data);
+
+	kfree(cb_info);
+}
+
+void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
+	uint32_t status)
+{
+	struct sync_callback_info  *sync_cb;
+	struct sync_user_payload   *payload_info;
+	struct sync_callback_info  *temp_sync_cb;
+	struct sync_table_row      *signalable_row;
+	struct sync_user_payload   *temp_payload_info;
+
+	signalable_row = sync_dev->sync_table + sync_obj;
+	if (signalable_row->state == CAM_SYNC_STATE_INVALID) {
+		CAM_DBG(CAM_SYNC,
+			"Accessing invalid sync object:%i", sync_obj);
+		return;
+	}
+
+	/* Dispatch kernel callbacks if any were registered earlier */
+	list_for_each_entry_safe(sync_cb,
+		temp_sync_cb, &signalable_row->callback_list, list) {
+		sync_cb->status = status;
+		list_del_init(&sync_cb->list);
+		queue_work(sync_dev->work_queue,
+			&sync_cb->cb_dispatch_work);
+	}
+
+	/* Dispatch user payloads if any were registered earlier */
+	list_for_each_entry_safe(payload_info, temp_payload_info,
+		&signalable_row->user_payload_list, list) {
+		spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
+		if (!sync_dev->cam_sync_eventq) {
+			spin_unlock_bh(
+				&sync_dev->cam_sync_eventq_lock);
+			break;
+		}
+		spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
+		cam_sync_util_send_v4l2_event(
+			CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
+			sync_obj,
+			status,
+			payload_info->payload_data,
+			CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
+
+		list_del_init(&payload_info->list);
+		/*
+		 * We can free the list node here because
+		 * sending V4L event will make a deep copy
+		 * anyway
+		 */
+		 kfree(payload_info);
+	}
+
+	/*
+	 * This needs to be done because we want to unblock anyone
+	 * who might be blocked and waiting on this sync object
+	 */
+	complete_all(&signalable_row->signaled);
+}
+
+void cam_sync_util_send_v4l2_event(uint32_t id,
+	uint32_t sync_obj,
+	int status,
+	void *payload,
+	int len)
+{
+	struct v4l2_event event;
+	__u64 *payload_data = NULL;
+	struct cam_sync_ev_header *ev_header = NULL;
+
+	event.id = id;
+	event.type = CAM_SYNC_V4L_EVENT;
+
+	ev_header = CAM_SYNC_GET_HEADER_PTR(event);
+	ev_header->sync_obj = sync_obj;
+	ev_header->status = status;
+
+	payload_data = CAM_SYNC_GET_PAYLOAD_PTR(event, __u64);
+	memcpy(payload_data, payload, len);
+
+	v4l2_event_queue(sync_dev->vdev, &event);
+	CAM_DBG(CAM_SYNC, "send v4l2 event for sync_obj :%d",
+		sync_obj);
+}
+
+int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
+	int new_state)
+{
+	int rc = 0;
+
+	switch (parent_row->state) {
+	case CAM_SYNC_STATE_ACTIVE:
+	case CAM_SYNC_STATE_SIGNALED_SUCCESS:
+		parent_row->state = new_state;
+		break;
+
+	case CAM_SYNC_STATE_SIGNALED_ERROR:
+		break;
+
+	case CAM_SYNC_STATE_INVALID:
+	default:
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
+	uint32_t list_clean_type, uint32_t sync_obj)
+{
+	struct sync_child_info *child_info = NULL;
+	struct sync_child_info *temp_child_info = NULL;
+	uint32_t                curr_sync_obj;
+
+	list_for_each_entry_safe(child_info,
+			temp_child_info, &row->children_list, list) {
+		if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
+			(child_info->sync_id != sync_obj))
+			continue;
+
+		curr_sync_obj = child_info->sync_id;
+		list_del_init(&child_info->list);
+		kfree(child_info);
+
+		if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
+			(curr_sync_obj == sync_obj))
+			break;
+	}
+}
+
+void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
+	uint32_t list_clean_type, uint32_t sync_obj)
+{
+	struct sync_parent_info *parent_info = NULL;
+	struct sync_parent_info *temp_parent_info = NULL;
+	uint32_t                 curr_sync_obj;
+
+	list_for_each_entry_safe(parent_info,
+			temp_parent_info, &row->parents_list, list) {
+		if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
+			(parent_info->sync_id != sync_obj))
+			continue;
+
+		curr_sync_obj = parent_info->sync_id;
+		list_del_init(&parent_info->list);
+		kfree(parent_info);
+
+		if ((list_clean_type == SYNC_LIST_CLEAN_ONE) &&
+			(curr_sync_obj == sync_obj))
+			break;
+	}
+}
diff --git a/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
new file mode 100644
index 0000000..c459aa2
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_sync/cam_sync_util.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __CAM_SYNC_UTIL_H__
+#define __CAM_SYNC_UTIL_H__
+
+
+#include <cam_sync_api.h>
+#include "cam_sync_private.h"
+#include "cam_debug_util.h"
+
+extern struct sync_device *sync_dev;
+
+/**
+ * @brief: Finds an empty row in the sync table and sets its corresponding bit
+ * in the bit array
+ *
+ * @param sync_dev : Pointer to the sync device instance
+ * @param idx      : Pointer to an long containing the index found in the bit
+ *                   array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_util_find_and_set_empty_row(struct sync_device *sync_dev,
+	long *idx);
+
+/**
+ * @brief: Function to initialize an empty row in the sync table. This should be
+ *         called only for individual sync objects.
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx   : Index of row to initialize
+ * @param name  : Optional string representation of the sync object. Should be
+ *                63 characters or less
+ * @param type  : type of row to be initialized
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_init_row(struct sync_table_row *table,
+	uint32_t idx, const char *name, uint32_t type);
+
+/**
+ * @brief: Function to uninitialize a row in the sync table
+ *
+ * @param table : Pointer to the sync objects table
+ * @param idx   : Index of row to initialize
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+
+/**
+ * @brief: Function to initialize a row in the sync table when the object is a
+ *         group object, also known as a merged sync object
+ *
+ * @param table     : Pointer to the sync objects table
+ * @param idx       : Index of row to initialize
+ * @param sync_objs : Array of sync objects which will merged
+ *                    or grouped together
+ * @param num_objs  : Number of sync objects in the array
+ *
+ * @return Status of operation. Negative in case of error. Zero otherwise.
+ */
+int cam_sync_init_group_object(struct sync_table_row *table,
+	uint32_t idx,
+	uint32_t *sync_objs,
+	uint32_t num_objs);
+
+int cam_sync_deinit_object(struct sync_table_row *table, uint32_t idx);
+
+/**
+ * @brief: Function to dispatch a kernel callback for a sync callback
+ *
+ * @param cb_dispatch_work : Pointer to the work_struct that needs to be
+ *                           dispatched
+ *
+ * @return None
+ */
+void cam_sync_util_cb_dispatch(struct work_struct *cb_dispatch_work);
+
+/**
+ * @brief: Function to dispatch callbacks for a signaled sync object
+ *
+ * @sync_obj : Sync object that is signaled
+ * @status   : Status of the signaled object
+ *
+ * @return None
+ */
+void cam_sync_util_dispatch_signaled_cb(int32_t sync_obj,
+	uint32_t status);
+
+/**
+ * @brief: Function to send V4L event to user space
+ * @param id       : V4L event id to send
+ * @param sync_obj : Sync obj for which event needs to be sent
+ * @param status   : Status of the event
+ * @payload        : Payload that needs to be sent to user space
+ * @len            : Length of the payload
+ *
+ * @return None
+ */
+void cam_sync_util_send_v4l2_event(uint32_t id,
+	uint32_t sync_obj,
+	int status,
+	void *payload,
+	int len);
+
+/**
+ * @brief: Function which gets the next state of the sync object based on the
+ *         current state and the new state
+ *
+ * @param current_state : Current state of the sync object
+ * @param new_state     : New state of the sync object
+ *
+ * @return Next state of the sync object
+ */
+int cam_sync_util_update_parent_state(struct sync_table_row *parent_row,
+	int new_state);
+
+/**
+ * @brief: Function to clean up the children of a sync object
+ * @row                 : Row whose child list to clean
+ * @list_clean_type     : Clean specific object or clean all objects
+ * @sync_obj            : Sync object to be clean if list clean type is
+ *                          SYNC_LIST_CLEAN_ONE
+ *
+ * @return None
+ */
+void cam_sync_util_cleanup_children_list(struct sync_table_row *row,
+	uint32_t list_clean_type, uint32_t sync_obj);
+
+/**
+ * @brief: Function to clean up the parents of a sync object
+ * @row                 : Row whose parent list to clean
+ * @list_clean_type     : Clean specific object or clean all objects
+ * @sync_obj            : Sync object to be clean if list clean type is
+ *                          SYNC_LIST_CLEAN_ONE
+ *
+ * @return None
+ */
+void cam_sync_util_cleanup_parents_list(struct sync_table_row *row,
+	uint32_t list_clean_type, uint32_t sync_obj);
+
+#endif /* __CAM_SYNC_UTIL_H__ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/Makefile b/drivers/media/platform/msm/camera/cam_utils/Makefile
new file mode 100644
index 0000000..71cf743
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_core/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_req_mgr/
+ccflags-y += -Idrivers/media/platform/msm/camera/cam_smmu/
+
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_soc_util.o cam_io_util.o cam_packet_util.o cam_debug_util.o cam_trace.o cam_common_util.o
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c
new file mode 100644
index 0000000..870cbf6
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "cam_common_util.h"
+#include "cam_debug_util.h"
+
+int cam_common_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index)
+{
+	int i;
+
+	for (i = 0; i < num_strings; i++) {
+		if (strnstr(strings[i], matching_string, strlen(strings[i]))) {
+			CAM_DBG(CAM_UTIL, "matched %s : %d\n",
+				matching_string, i);
+			*index = i;
+			return 0;
+		}
+	}
+
+	return -EINVAL;
+}
+
+uint32_t cam_common_util_remove_duplicate_arr(int32_t *arr, uint32_t num)
+{
+	int i, j;
+	uint32_t wr_idx = 1;
+
+	if (!arr) {
+		CAM_ERR(CAM_UTIL, "Null input array");
+		return 0;
+	}
+
+	for (i = 1; i < num; i++) {
+		for (j = 0; j < wr_idx ; j++) {
+			if (arr[i] == arr[j])
+				break;
+		}
+		if (j == wr_idx)
+			arr[wr_idx++] = arr[i];
+	}
+
+	return wr_idx;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
new file mode 100644
index 0000000..57f67fb
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_common_util.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_COMMON_UTIL_H_
+#define _CAM_COMMON_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#define CAM_BITS_MASK_SHIFT(x, mask, shift) (((x) & (mask)) >> shift)
+
+#define PTR_TO_U64(ptr) ((uint64_t)(uintptr_t)ptr)
+#define U64_TO_PTR(ptr) ((void *)(uintptr_t)ptr)
+
+/**
+ * cam_common_util_get_string_index()
+ *
+ * @brief                  Match the string from list of strings to return
+ *                         matching index
+ *
+ * @strings:               Pointer to list of strings
+ * @num_strings:           Number of strings in 'strings'
+ * @matching_string:       String to match
+ * @index:                 Pointer to index to return matching index
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_common_util_get_string_index(const char **strings,
+	uint32_t num_strings, char *matching_string, uint32_t *index);
+
+/**
+ * cam_common_util_remove_duplicate_arr()
+ *
+ * @brief                  Move all the unique integers to the start of
+ *                         the array and return the number of unique integers
+ *
+ * @array:                 Pointer to the first integer of array
+ * @num:                   Number of elements in array
+ *
+ * @return:                Number of unique integers in array
+ */
+uint32_t cam_common_util_remove_duplicate_arr(int32_t *array,
+	uint32_t num);
+
+#endif /* _CAM_COMMON_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
new file mode 100644
index 0000000..968e95e
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundataion. All rights reserved.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+
+#include "cam_debug_util.h"
+
+static uint debug_mdl;
+module_param(debug_mdl, uint, 0644);
+
+const char *cam_get_module_name(unsigned int module_id)
+{
+	const char *name = NULL;
+
+	switch (module_id) {
+	case CAM_CDM:
+		name = "CAM-CDM";
+		break;
+	case CAM_CORE:
+		name = "CAM-CORE";
+		break;
+	case CAM_CRM:
+		name = "CAM-CRM";
+		break;
+	case CAM_CPAS:
+		name = "CAM-CPAS";
+		break;
+	case CAM_ISP:
+		name = "CAM-ISP";
+		break;
+	case CAM_SENSOR:
+		name = "CAM-SENSOR";
+		break;
+	case CAM_SMMU:
+		name = "CAM-SMMU";
+		break;
+	case CAM_SYNC:
+		name = "CAM-SYNC";
+		break;
+	case CAM_ICP:
+		name = "CAM-ICP";
+		break;
+	case CAM_JPEG:
+		name = "CAM-JPEG";
+		break;
+	case CAM_FD:
+		name = "CAM-FD";
+		break;
+	case CAM_LRME:
+		name = "CAM-LRME";
+		break;
+	case CAM_FLASH:
+		name = "CAM-FLASH";
+		break;
+	case CAM_ACTUATOR:
+		name = "CAM-ACTUATOR";
+		break;
+	case CAM_CCI:
+		name = "CAM-CCI";
+		break;
+	case CAM_CSIPHY:
+		name = "CAM-CSIPHY";
+		break;
+	case CAM_EEPROM:
+		name = "CAM-EEPROM";
+		break;
+	case CAM_UTIL:
+		name = "CAM-UTIL";
+		break;
+	case CAM_CTXT:
+		name = "CAM-CTXT";
+		break;
+	case CAM_HFI:
+		name = "CAM-HFI";
+		break;
+	case CAM_OIS:
+		name = "CAM-OIS";
+		break;
+	case CAM_IRQ_CTRL:
+		name = "CAM-IRQ-CTRL";
+		break;
+	case CAM_MEM:
+		name = "CAM-MEM";
+		break;
+	case CAM_PERF:
+		name = "CAM-PERF";
+		break;
+	case CAM_REQ:
+		name = "CAM-REQ";
+		break;
+	default:
+		name = "CAM";
+		break;
+	}
+
+	return name;
+}
+
+void cam_debug_log(unsigned int module_id, const char *func, const int line,
+	const char *fmt, ...)
+{
+	char str_buffer[STR_BUFFER_MAX_LENGTH];
+	va_list args;
+
+	va_start(args, fmt);
+
+	if (debug_mdl & module_id) {
+		vsnprintf(str_buffer, STR_BUFFER_MAX_LENGTH, fmt, args);
+		pr_info("CAM_DBG: %s: %s: %d: %s\n",
+			cam_get_module_name(module_id),
+			func, line, str_buffer);
+		va_end(args);
+	}
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
new file mode 100644
index 0000000..b488cfe
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_debug_util.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_DEBUG_UTIL_H_
+#define _CAM_DEBUG_UTIL_H_
+
+#define CAM_CDM        (1 << 0)
+#define CAM_CORE       (1 << 1)
+#define CAM_CPAS       (1 << 2)
+#define CAM_ISP        (1 << 3)
+#define CAM_CRM        (1 << 4)
+#define CAM_SENSOR     (1 << 5)
+#define CAM_SMMU       (1 << 6)
+#define CAM_SYNC       (1 << 7)
+#define CAM_ICP        (1 << 8)
+#define CAM_JPEG       (1 << 9)
+#define CAM_FD         (1 << 10)
+#define CAM_LRME       (1 << 11)
+#define CAM_FLASH      (1 << 12)
+#define CAM_ACTUATOR   (1 << 13)
+#define CAM_CCI        (1 << 14)
+#define CAM_CSIPHY     (1 << 15)
+#define CAM_EEPROM     (1 << 16)
+#define CAM_UTIL       (1 << 17)
+#define CAM_HFI        (1 << 18)
+#define CAM_CTXT       (1 << 19)
+#define CAM_OIS        (1 << 20)
+#define CAM_RES        (1 << 21)
+#define CAM_MEM        (1 << 22)
+
+/* CAM_IRQ_CTRL: For events in irq controller */
+#define CAM_IRQ_CTRL   (1 << 23)
+
+/* CAM_REQ: Tracks a request submitted to KMD */
+#define CAM_REQ        (1 << 24)
+
+/* CAM_PERF: Used for performance (clock, BW etc) logs */
+#define CAM_PERF       (1 << 25)
+
+#define STR_BUFFER_MAX_LENGTH  1024
+
+/*
+ *  cam_debug_log()
+ *
+ * @brief     :  Get the Module name from module ID and print
+ *               respective debug logs
+ *
+ * @module_id :  Respective Module ID which is calling this function
+ * @func      :  Function which is calling to print logs
+ * @line      :  Line number associated with the function which is calling
+ *               to print log
+ * @fmt       :  Formatted string which needs to be print in the log
+ *
+ */
+void cam_debug_log(unsigned int module_id, const char *func, const int line,
+	const char *fmt, ...);
+
+/*
+ * cam_get_module_name()
+ *
+ * @brief     :  Get the module name from module ID
+ *
+ * @module_id :  Module ID which is using this function
+ */
+const char *cam_get_module_name(unsigned int module_id);
+
+/*
+ * CAM_ERR
+ * @brief    :  This Macro will print error logs
+ *
+ * @__module :  Respective module id which is been calling this Macro
+ * @fmt      :  Formatted string which needs to be print in log
+ * @args     :  Arguments which needs to be print in log
+ */
+#define CAM_ERR(__module, fmt, args...)                            \
+	pr_err("CAM_ERR: %s: %s: %d " fmt "\n",                     \
+		cam_get_module_name(__module), __func__,  __LINE__, ##args)
+/*
+ * CAM_WARN
+ * @brief    :  This Macro will print warning logs
+ *
+ * @__module :  Respective module id which is been calling this Macro
+ * @fmt      :  Formatted string which needs to be print in log
+ * @args     :  Arguments which needs to be print in log
+ */
+#define CAM_WARN(__module, fmt, args...)                           \
+	pr_warn("CAM_WARN: %s: %s: %d " fmt "\n",                     \
+		cam_get_module_name(__module), __func__,  __LINE__, ##args)
+/*
+ * CAM_INFO
+ * @brief    :  This Macro will print Information logs
+ *
+ * @__module :  Respective module id which is been calling this Macro
+ * @fmt      :  Formatted string which needs to be print in log
+ * @args     :  Arguments which needs to be print in log
+ */
+#define CAM_INFO(__module, fmt, args...)                           \
+	pr_info("CAM_INFO: %s: %s: %d " fmt "\n",                     \
+		cam_get_module_name(__module), __func__,  __LINE__, ##args)
+
+/*
+ * CAM_INFO_RATE_LIMIT
+ * @brief    :  This Macro will print info logs with ratelimit
+ *
+ * @__module :  Respective module id which is been calling this Macro
+ * @fmt      :  Formatted string which needs to be print in log
+ * @args     :  Arguments which needs to be print in log
+ */
+#define CAM_INFO_RATE_LIMIT(__module, fmt, args...)                 \
+	pr_err_ratelimited("CAM_INFO: %s: %s: %d " fmt "\n",            \
+		cam_get_module_name(__module), __func__,  __LINE__, ##args)
+
+/*
+ * CAM_DBG
+ * @brief    :  This Macro will print debug logs when enabled using GROUP
+ *
+ * @__module :  Respective module id which is been calling this Macro
+ * @fmt      :  Formatted string which needs to be print in log
+ * @args     :  Arguments which needs to be print in log
+ */
+#define CAM_DBG(__module, fmt, args...)                            \
+	cam_debug_log(__module, __func__, __LINE__, fmt, ##args)
+
+/*
+ * CAM_ERR_RATE_LIMIT
+ * @brief    :  This Macro will print error print logs with ratelimit
+ */
+#define CAM_ERR_RATE_LIMIT(__module, fmt, args...)                 \
+	pr_err_ratelimited("CAM_ERR: %s: %s: %d " fmt "\n",            \
+		cam_get_module_name(__module), __func__,  __LINE__, ##args)
+
+#endif /* _CAM_DEBUG_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
new file mode 100644
index 0000000..b6fa1ac
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2011-2014, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+
+int cam_io_w(uint32_t data, void __iomem *addr)
+{
+	if (!addr)
+		return -EINVAL;
+
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+	writel_relaxed_no_log(data, addr);
+
+	return 0;
+}
+
+int cam_io_w_mb(uint32_t data, void __iomem *addr)
+{
+	if (!addr)
+		return -EINVAL;
+
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+	/* Ensure previous writes are done */
+	wmb();
+	writel_relaxed_no_log(data, addr);
+	/* Ensure previous writes are done */
+	wmb();
+
+	return 0;
+}
+
+uint32_t cam_io_r(void __iomem *addr)
+{
+	uint32_t data;
+
+	if (!addr) {
+		CAM_ERR(CAM_UTIL, "Invalid args");
+		return 0;
+	}
+
+	data = readl_relaxed(addr);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+
+	return data;
+}
+
+uint32_t cam_io_r_mb(void __iomem *addr)
+{
+	uint32_t data;
+
+	if (!addr) {
+		CAM_ERR(CAM_UTIL, "Invalid args");
+		return 0;
+	}
+
+	/* Ensure previous read is done */
+	rmb();
+	data = readl_relaxed(addr);
+	CAM_DBG(CAM_UTIL, "0x%pK %08x", addr, data);
+	/* Ensure previous read is done */
+	rmb();
+
+	return data;
+}
+
+int cam_io_memcpy(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len)
+{
+	int i;
+	uint32_t *d = (uint32_t *) dest_addr;
+	uint32_t *s = (uint32_t *) src_addr;
+
+	if (!dest_addr || !src_addr)
+		return -EINVAL;
+
+	CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
+
+	for (i = 0; i < len/4; i++) {
+		CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
+		writel_relaxed(*s++, d++);
+	}
+
+	return 0;
+}
+
+int  cam_io_memcpy_mb(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len)
+{
+	int i;
+	uint32_t *d = (uint32_t *) dest_addr;
+	uint32_t *s = (uint32_t *) src_addr;
+
+	if (!dest_addr || !src_addr)
+		return -EINVAL;
+
+	CAM_DBG(CAM_UTIL, "%pK %pK %d", dest_addr, src_addr, len);
+
+	/*
+	 * Do not use cam_io_w_mb to avoid double wmb() after a write
+	 * and before the next write.
+	 */
+	wmb();
+	for (i = 0; i < (len / 4); i++) {
+		CAM_DBG(CAM_UTIL, "0x%pK %08x", d, *s);
+		writel_relaxed(*s++, d++);
+	}
+	/* Ensure previous writes are done */
+	wmb();
+
+	return 0;
+}
+
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+	unsigned long min_usecs, unsigned long max_usecs)
+{
+	uint32_t tmp, cnt = 0;
+	int rc = 0;
+
+	if (!addr)
+		return -EINVAL;
+
+	tmp = readl_relaxed(addr);
+	while ((tmp != wait_data) && (cnt++ < retry)) {
+		if (min_usecs > 0 && max_usecs > 0)
+			usleep_range(min_usecs, max_usecs);
+		tmp = readl_relaxed(addr);
+	}
+
+	if (cnt > retry) {
+		CAM_DBG(CAM_UTIL, "Poll failed by value");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+	uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+	unsigned long max_usecs)
+{
+	uint32_t tmp, cnt = 0;
+	int rc = 0;
+
+	if (!addr)
+		return -EINVAL;
+
+	tmp = readl_relaxed(addr);
+	while (((tmp & bmask) != wait_data) && (cnt++ < retry)) {
+		if (min_usecs > 0 && max_usecs > 0)
+			usleep_range(min_usecs, max_usecs);
+		tmp = readl_relaxed(addr);
+	}
+
+	if (cnt > retry) {
+		CAM_DBG(CAM_UTIL, "Poll failed with mask");
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
+			i, len, data[i], addr);
+		writel_relaxed(data[i], addr);
+	}
+
+	return 0;
+}
+
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr =%pK",
+			i, len, data[i], addr);
+		/* Ensure previous writes are done */
+		wmb();
+		writel_relaxed(data[i], addr);
+	}
+
+	return 0;
+}
+
+#define __OFFSET(__i)   (data[__i][0])
+#define __VAL(__i)      (data[__i][1])
+int cam_io_w_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr_base)
+		return -EINVAL;
+
+	for (i = 0; i < len; i++) {
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
+			i, len, __VAL(i), addr_base, __OFFSET(i));
+		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+	}
+
+	return 0;
+}
+
+int cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len)
+{
+	int i;
+
+	if (!data || !len || !addr_base)
+		return -EINVAL;
+
+	/* Ensure write is done */
+	wmb();
+	for (i = 0; i < len; i++) {
+		CAM_DBG(CAM_UTIL, "i= %d len =%d val=%x addr_base =%pK reg=%x",
+			i, len, __VAL(i), addr_base, __OFFSET(i));
+		writel_relaxed(__VAL(i), addr_base + __OFFSET(i));
+	}
+
+	return 0;
+}
+
+#define BYTES_PER_REGISTER           4
+#define NUM_REGISTER_PER_LINE        4
+#define REG_OFFSET(__start, __i)    (__start + (__i * BYTES_PER_REGISTER))
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size)
+{
+	char          line_str[128];
+	char         *p_str;
+	int           i;
+	uint32_t      data;
+
+	CAM_DBG(CAM_UTIL, "addr=%pK offset=0x%x size=%d",
+		base_addr, start_offset, size);
+
+	if (!base_addr || (size <= 0))
+		return -EINVAL;
+
+	line_str[0] = '\0';
+	p_str = line_str;
+	for (i = 0; i < size; i++) {
+		if (i % NUM_REGISTER_PER_LINE == 0) {
+			snprintf(p_str, 12, "0x%08x: ",
+				REG_OFFSET(start_offset, i));
+			p_str += 11;
+		}
+		data = readl_relaxed(base_addr + REG_OFFSET(start_offset, i));
+		snprintf(p_str, 9, "%08x ", data);
+		p_str += 8;
+		if ((i + 1) % NUM_REGISTER_PER_LINE == 0) {
+			CAM_ERR(CAM_UTIL, "%s", line_str);
+			line_str[0] = '\0';
+			p_str = line_str;
+		}
+	}
+	if (line_str[0] != '\0')
+		CAM_ERR(CAM_UTIL, "%s", line_str);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h
new file mode 100644
index 0000000..615b51c
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_io_util.h
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2011-2014, 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_IO_UTIL_H_
+#define _CAM_IO_UTIL_H_
+
+#include <linux/types.h>
+
+/**
+ * cam_io_w()
+ *
+ * @brief:              Camera IO util for register write
+ *
+ * @data:               Value to be written
+ * @addr:               Address used to write the value
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_w_mb()
+ *
+ * @brief:              Camera IO util for register write with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @data:               Value to be written
+ * @addr:               Address used to write the value
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_mb(uint32_t data, void __iomem *addr);
+
+/**
+ * cam_io_r()
+ *
+ * @brief:              Camera IO util for register read
+ *
+ * @addr:               Address of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+uint32_t cam_io_r(void __iomem *addr);
+
+/**
+ * cam_io_r_mb()
+ *
+ * @brief:              Camera IO util for register read with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call rmb() independently in the caller.
+ *
+ * @addr:               Address of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+uint32_t cam_io_r_mb(void __iomem *addr);
+
+/**
+ * cam_io_memcpy()
+ *
+ * @brief:              Camera IO util for memory to register copy
+ *
+ * @dest_addr:          Destination register address
+ * @src_addr:           Source regiser address
+ * @len:                Range to be copied
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_memcpy(void __iomem *dest_addr,
+		void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_memcpy_mb()
+ *
+ * @brief:              Camera IO util for memory to register copy
+ *                      with barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @dest_addr:          Destination register address
+ * @src_addr:           Source regiser address
+ * @len:                Range to be copied
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_memcpy_mb(void __iomem *dest_addr,
+	void __iomem *src_addr, uint32_t len);
+
+/**
+ * cam_io_poll_value_wmask()
+ *
+ * @brief:              Poll register value with bitmask.
+ *
+ * @addr:               Register address to be polled
+ * @wait_data:          Wait until @bmask read from @addr matches this data
+ * @bmask:              Bit mask
+ * @retry:              Number of retry
+ * @min_usecs:          Minimum time to wait for retry
+ * @max_usecs:          Maximum time to wait for retry
+ *
+ * @return:             Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value_wmask(void __iomem *addr, uint32_t wait_data,
+	uint32_t bmask, uint32_t retry, unsigned long min_usecs,
+	unsigned long max_usecs);
+
+/**
+ * cam_io_poll_value()
+ *
+ * @brief:              Poll register value
+ *
+ * @addr:               Register address to be polled
+ * @wait_data:          Wait until value read from @addr matches this data
+ * @retry:              Number of retry
+ * @min_usecs:          Minimum time to wait for retry
+ * @max_usecs:          Maximum time to wait for retry
+ *
+ * @return:             Success or Failure
+ *
+ * This function can sleep so it should not be called from interrupt
+ * handler, spin_lock etc.
+ */
+int cam_io_poll_value(void __iomem *addr, uint32_t wait_data, uint32_t retry,
+	unsigned long min_usecs, unsigned long max_usecs);
+
+/**
+ * cam_io_w_same_offset_block()
+ *
+ * @brief:              Write a block of data to same address
+ *
+ * @data:               Block data to be written
+ * @addr:               Register offset to be written.
+ * @len:                Number of the data to be written
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len);
+
+/**
+ * cam_io_w_mb_same_offset_block()
+ *
+ * @brief:              Write a block of data to same address with barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @data:               Block data to be written
+ * @addr:               Register offset to be written.
+ * @len:                Number of the data to be written
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_w_mb_same_offset_block(const uint32_t *data, void __iomem *addr,
+	uint32_t len);
+
+/**
+ * cam_io_w_offset_val_block()
+ *
+ * @brief:              This API is to write a block of registers
+ *                      represented by a 2 dimensional array table with
+ *                      register offset and value pair
+ *
+ *  offset0, value0,
+ *  offset1, value1,
+ *  offset2, value2,
+ *  and so on...
+ *
+ * @data:               Pointer to 2-dimensional offset-value array
+ * @addr_base:          Base address to which offset will be added to
+ *                      get the register address
+ * @len:                Length of offset-value pair array to be written in
+ *                      number of uin32_t
+ *
+ * @return:             Success or Failure
+ *
+ */
+int32_t cam_io_w_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_w_mb_offset_val_block()
+ *
+ * @brief:              This API is to write a block of registers
+ *                      represented by a 2 dimensional array table with
+ *                      register offset and value pair with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *                      The OFFSETS NEED to be different because of the way
+ *                      barrier is used here.
+ *
+ *  offset0, value0,
+ *  offset1, value1,
+ *  offset2, value2,
+ *  and so on...
+ *
+ * @data:               Pointer to 2-dimensional offset-value array
+ * @addr_base:          Base address to which offset will be added to
+ *                      get the register address
+ * @len:                Length of offset-value pair array to be written in
+ *                      number of uin32_t
+ *
+ * @return:             Success or Failure
+ *
+ */
+int32_t cam_io_w_mb_offset_val_block(const uint32_t data[][2],
+	void __iomem *addr_base, uint32_t len);
+
+/**
+ * cam_io_dump()
+ *
+ * @brief:              Camera IO util for dumping a range of register
+ *
+ * @base_addr:          Start register address for the dumping
+ * @start_offset:       Start register offset for the dump
+ * @size:               Size specifying the range for dumping
+ *
+ * @return:             Success or Failure
+ */
+int cam_io_dump(void __iomem *base_addr, uint32_t start_offset, int size);
+
+#endif /* _CAM_IO_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
new file mode 100644
index 0000000..31dd486
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+
+#include "cam_mem_mgr.h"
+#include "cam_packet_util.h"
+#include "cam_debug_util.h"
+
+int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+	size_t *len)
+{
+	int rc = 0;
+	uintptr_t kmd_buf_addr = 0;
+
+	rc = cam_mem_get_cpu_buf(handle, &kmd_buf_addr, len);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "Unable to get the virtual address %d", rc);
+	} else {
+		if (kmd_buf_addr && *len) {
+			*buf_addr = (uint32_t *)kmd_buf_addr;
+		} else {
+			CAM_ERR(CAM_UTIL, "Invalid addr and length :%zd", *len);
+			rc = -ENOMEM;
+		}
+	}
+	return rc;
+}
+
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc)
+{
+	if ((cmd_desc->length > cmd_desc->size) ||
+		(cmd_desc->mem_handle <= 0)) {
+		CAM_ERR(CAM_UTIL, "invalid cmd arg %d %d %d %d",
+			cmd_desc->offset, cmd_desc->length,
+			cmd_desc->mem_handle, cmd_desc->size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_packet_util_validate_packet(struct cam_packet *packet)
+{
+	if (!packet)
+		return -EINVAL;
+
+	CAM_DBG(CAM_UTIL, "num cmd buf:%d num of io config:%d kmd buf index:%d",
+		packet->num_cmd_buf, packet->num_io_configs,
+		packet->kmd_cmd_buf_index);
+
+	if ((packet->kmd_cmd_buf_index >= packet->num_cmd_buf) ||
+		(!packet->header.size) ||
+		(packet->cmd_buf_offset > packet->header.size) ||
+		(packet->io_configs_offset > packet->header.size))  {
+		CAM_ERR(CAM_UTIL, "invalid packet:%d %d %d %d %d",
+			packet->kmd_cmd_buf_index,
+			packet->num_cmd_buf, packet->cmd_buf_offset,
+			packet->io_configs_offset, packet->header.size);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+	struct cam_kmd_buf_info *kmd_buf)
+{
+	int                      rc = 0;
+	size_t                   len = 0;
+	struct cam_cmd_buf_desc *cmd_desc;
+	uint32_t                *cpu_addr;
+
+	if (!packet || !kmd_buf) {
+		CAM_ERR(CAM_UTIL, "Invalid arg %pK %pK", packet, kmd_buf);
+		return -EINVAL;
+	}
+
+	/* Take first command descriptor and add offset to it for kmd*/
+	cmd_desc = (struct cam_cmd_buf_desc *) ((uint8_t *)
+		&packet->payload + packet->cmd_buf_offset);
+	cmd_desc += packet->kmd_cmd_buf_index;
+
+	rc = cam_packet_util_validate_cmd_desc(cmd_desc);
+	if (rc)
+		return rc;
+
+	rc = cam_packet_util_get_cmd_mem_addr(cmd_desc->mem_handle, &cpu_addr,
+		&len);
+	if (rc)
+		return rc;
+
+	if (len < cmd_desc->size) {
+		CAM_ERR(CAM_UTIL, "invalid memory len:%zd and cmd desc size:%d",
+			len, cmd_desc->size);
+		return -EINVAL;
+	}
+
+	cpu_addr += (cmd_desc->offset / 4) + (packet->kmd_cmd_buf_offset / 4);
+	CAM_DBG(CAM_UTIL, "total size %d, cmd size: %d, KMD buffer size: %d",
+		cmd_desc->size, cmd_desc->length,
+		cmd_desc->size - cmd_desc->length);
+	CAM_DBG(CAM_UTIL, "hdl 0x%x, cmd offset %d, kmd offset %d, addr 0x%pK",
+		cmd_desc->mem_handle, cmd_desc->offset,
+		packet->kmd_cmd_buf_offset, cpu_addr);
+
+	kmd_buf->cpu_addr   = cpu_addr;
+	kmd_buf->handle     = cmd_desc->mem_handle;
+	kmd_buf->offset     = cmd_desc->offset + packet->kmd_cmd_buf_offset;
+	kmd_buf->size       = cmd_desc->size - cmd_desc->length;
+	kmd_buf->used_bytes = 0;
+
+	return rc;
+}
+
+int cam_packet_util_process_patches(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl)
+{
+	struct cam_patch_desc *patch_desc = NULL;
+	dma_addr_t iova_addr;
+	uintptr_t  cpu_addr;
+	uint32_t   temp;
+	uint32_t  *dst_cpu_addr;
+	uint32_t  *src_buf_iova_addr;
+	size_t     dst_buf_len;
+	size_t     src_buf_size;
+	int        i;
+	int        rc = 0;
+	int32_t    hdl;
+
+	/* process patch descriptor */
+	patch_desc = (struct cam_patch_desc *)
+			((uint32_t *) &packet->payload +
+			packet->patch_offset/4);
+	CAM_DBG(CAM_UTIL, "packet = %pK patch_desc = %pK size = %lu",
+			(void *)packet, (void *)patch_desc,
+			sizeof(struct cam_patch_desc));
+
+	for (i = 0; i < packet->num_patches; i++) {
+
+		hdl = cam_mem_is_secure_buf(patch_desc[i].src_buf_hdl) ?
+			sec_mmu_hdl : iommu_hdl;
+		rc = cam_mem_get_io_buf(patch_desc[i].src_buf_hdl,
+			hdl, &iova_addr, &src_buf_size);
+		if (rc < 0) {
+			CAM_ERR(CAM_UTIL, "unable to get src buf address");
+			return rc;
+		}
+		src_buf_iova_addr = (uint32_t *)iova_addr;
+		temp = iova_addr;
+
+		rc = cam_mem_get_cpu_buf(patch_desc[i].dst_buf_hdl,
+			&cpu_addr, &dst_buf_len);
+		if (rc < 0) {
+			CAM_ERR(CAM_UTIL, "unable to get dst buf address");
+			return rc;
+		}
+		dst_cpu_addr = (uint32_t *)cpu_addr;
+
+		CAM_DBG(CAM_UTIL, "i = %d patch info = %x %x %x %x", i,
+			patch_desc[i].dst_buf_hdl, patch_desc[i].dst_offset,
+			patch_desc[i].src_buf_hdl, patch_desc[i].src_offset);
+
+		dst_cpu_addr = (uint32_t *)((uint8_t *)dst_cpu_addr +
+			patch_desc[i].dst_offset);
+		temp += patch_desc[i].src_offset;
+
+		*dst_cpu_addr = temp;
+
+		CAM_DBG(CAM_UTIL,
+			"patch is done for dst %pK with src %pK value %llx",
+			dst_cpu_addr, src_buf_iova_addr,
+			*((uint64_t *)dst_cpu_addr));
+	}
+
+	return rc;
+}
+
+int cam_packet_util_process_generic_cmd_buffer(
+	struct cam_cmd_buf_desc *cmd_buf,
+	cam_packet_generic_blob_handler blob_handler_cb, void *user_data)
+{
+	int       rc;
+	uintptr_t  cpu_addr;
+	size_t    buf_size;
+	uint32_t *blob_ptr;
+	uint32_t  blob_type, blob_size, blob_block_size, len_read;
+
+	if (!cmd_buf || !blob_handler_cb) {
+		CAM_ERR(CAM_UTIL, "Invalid args %pK %pK",
+			cmd_buf, blob_handler_cb);
+		return -EINVAL;
+	}
+
+	if (!cmd_buf->length || !cmd_buf->size) {
+		CAM_ERR(CAM_UTIL, "Invalid cmd buf size %d %d",
+			cmd_buf->length, cmd_buf->size);
+		return -EINVAL;
+	}
+
+	rc = cam_mem_get_cpu_buf(cmd_buf->mem_handle, &cpu_addr, &buf_size);
+	if (rc || !cpu_addr || (buf_size == 0)) {
+		CAM_ERR(CAM_UTIL, "Failed in Get cpu addr, rc=%d, cpu_addr=%pK",
+			rc, (void *)cpu_addr);
+		return rc;
+	}
+
+	blob_ptr = (uint32_t *)(((uint8_t *)cpu_addr) +
+		cmd_buf->offset);
+
+	CAM_DBG(CAM_UTIL,
+		"GenericCmdBuffer cpuaddr=%pK, blobptr=%pK, len=%d",
+		(void *)cpu_addr, (void *)blob_ptr, cmd_buf->length);
+
+	len_read = 0;
+	while (len_read < cmd_buf->length) {
+		blob_type =
+			((*blob_ptr) & CAM_GENERIC_BLOB_CMDBUFFER_TYPE_MASK) >>
+			CAM_GENERIC_BLOB_CMDBUFFER_TYPE_SHIFT;
+		blob_size =
+			((*blob_ptr) & CAM_GENERIC_BLOB_CMDBUFFER_SIZE_MASK) >>
+			CAM_GENERIC_BLOB_CMDBUFFER_SIZE_SHIFT;
+
+		blob_block_size = sizeof(uint32_t) +
+			(((blob_size + sizeof(uint32_t) - 1) /
+			sizeof(uint32_t)) * sizeof(uint32_t));
+
+		CAM_DBG(CAM_UTIL,
+			"Blob type=%d size=%d block_size=%d len_read=%d total=%d",
+			blob_type, blob_size, blob_block_size, len_read,
+			cmd_buf->length);
+
+		if (len_read + blob_block_size > cmd_buf->length) {
+			CAM_ERR(CAM_UTIL, "Invalid Blob %d %d %d %d",
+				blob_type, blob_size, len_read,
+				cmd_buf->length);
+			return -EINVAL;
+		}
+
+		len_read += blob_block_size;
+
+		rc = blob_handler_cb(user_data, blob_type, blob_size,
+			(uint8_t *)(blob_ptr + 1));
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "Error in handling blob type %d %d",
+				blob_type, blob_size);
+			return rc;
+		}
+
+		blob_ptr += (blob_block_size / sizeof(uint32_t));
+	}
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
new file mode 100644
index 0000000..a2501b9
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_packet_util.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_PACKET_UTIL_H_
+#define _CAM_PACKET_UTIL_H_
+
+#include <uapi/media/cam_defs.h>
+
+/**
+ * @brief                  KMD scratch buffer information
+ *
+ * @handle:                Memory handle
+ * @cpu_addr:              Cpu address
+ * @offset:                Offset from the start of the buffer
+ * @size:                  Size of the buffer
+ * @used_bytes:            Used memory in bytes
+ *
+ */
+struct cam_kmd_buf_info {
+	int        handle;
+	uint32_t  *cpu_addr;
+	uint32_t   offset;
+	uint32_t   size;
+	uint32_t   used_bytes;
+};
+
+/* Generic Cmd Buffer blob callback function type */
+typedef int (*cam_packet_generic_blob_handler)(void *user_data,
+	uint32_t blob_type, uint32_t blob_size, uint8_t *blob_data);
+
+/**
+ * cam_packet_util_get_cmd_mem_addr()
+ *
+ * @brief                  Get command buffer address
+ *
+ * @handle:                Command buffer memory handle
+ * @buf_addr:              Command buffer cpu mapped address
+ * @len:                   Command buffer length
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_packet_util_get_cmd_mem_addr(int handle, uint32_t **buf_addr,
+	size_t *len);
+
+/**
+ * cam_packet_util_validate_packet()
+ *
+ * @brief                  Validate the packet
+ *
+ * @packet:                Packet to be validated
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_packet_util_validate_packet(struct cam_packet *packet);
+
+/**
+ * cam_packet_util_validate_cmd_desc()
+ *
+ * @brief                  Validate the packet
+ *
+ * @cmd_desc:              Command descriptor to be validated
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_packet_util_validate_cmd_desc(struct cam_cmd_buf_desc *cmd_desc);
+
+/**
+ * cam_packet_util_get_kmd_buffer()
+ *
+ * @brief                  Get the kmd buffer from the packet command descriptor
+ *
+ * @packet:                Packet data
+ * @kmd_buf:               Extracted the KMD buffer information
+ *
+ * @return:                0 for success
+ *                         -EINVAL for Fail
+ */
+int cam_packet_util_get_kmd_buffer(struct cam_packet *packet,
+	struct cam_kmd_buf_info *kmd_buf_info);
+
+/**
+ * cam_packet_util_process_patches()
+ *
+ * @brief:              Replace the handle in Packet to Address using the
+ *                      information from patches.
+ *
+ * @packet:             Input packet containing Command Buffers and Patches
+ * @iommu_hdl:          IOMMU handle of the HW Device that received the packet
+ * @sec_iommu_hdl:      Secure IOMMU handle of the HW Device that
+ *                      received the packet
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_packet_util_process_patches(struct cam_packet *packet,
+	int32_t iommu_hdl, int32_t sec_mmu_hdl);
+
+/**
+ * cam_packet_util_process_generic_cmd_buffer()
+ *
+ * @brief:              Process Generic Blob command buffer. This utility
+ *                      function process the command buffer and calls the
+ *                      blob_handle_cb callback for each blob that exists
+ *                      in the command buffer.
+ *
+ * @cmd_buf:            Generic Blob Cmd Buffer handle
+ * @blob_handler_cb:    Callback pointer to call for each blob exists in the
+ *                      command buffer
+ * @user_data:          User data to be passed while callback
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_packet_util_process_generic_cmd_buffer(
+	struct cam_cmd_buf_desc *cmd_buf,
+	cam_packet_generic_blob_handler blob_handler_cb, void *user_data);
+
+#endif /* _CAM_PACKET_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
new file mode 100644
index 0000000..4127d37
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.c
@@ -0,0 +1,1640 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include "cam_soc_util.h"
+#include "cam_debug_util.h"
+
+static char supported_clk_info[256];
+static char debugfs_dir_name[64];
+
+/**
+ * cam_soc_util_get_string_from_level()
+ *
+ * @brief:     Returns the string for a given clk level
+ *
+ * @level:     Clock level
+ *
+ * @return:    String corresponding to the clk level
+ */
+static const char *cam_soc_util_get_string_from_level(
+	enum cam_vote_level level)
+{
+	switch (level) {
+	case CAM_SUSPEND_VOTE:
+		return "";
+	case CAM_MINSVS_VOTE:
+		return "MINSVS[1]";
+	case CAM_LOWSVS_VOTE:
+		return "LOWSVS[2]";
+	case CAM_SVS_VOTE:
+		return "SVS[3]";
+	case CAM_SVSL1_VOTE:
+		return "SVSL1[4]";
+	case CAM_NOMINAL_VOTE:
+		return "NOM[5]";
+	case CAM_NOMINALL1_VOTE:
+		return "NOML1[6]";
+	case CAM_TURBO_VOTE:
+		return "TURBO[7]";
+	default:
+		return "";
+	}
+}
+
+/**
+ * cam_soc_util_get_supported_clk_levels()
+ *
+ * @brief:      Returns the string of all the supported clk levels for
+ *              the given device
+ *
+ * @soc_info:   Device soc information
+ *
+ * @return:     String containing all supported clk levels
+ */
+static const char *cam_soc_util_get_supported_clk_levels(
+	struct cam_hw_soc_info *soc_info)
+{
+	int i = 0;
+
+	memset(supported_clk_info, 0, sizeof(supported_clk_info));
+	strlcat(supported_clk_info, "Supported levels: ",
+		sizeof(supported_clk_info));
+
+	for (i = 0; i < CAM_MAX_VOTE; i++) {
+		if (soc_info->clk_level_valid[i] == true) {
+			strlcat(supported_clk_info,
+				cam_soc_util_get_string_from_level(i),
+				sizeof(supported_clk_info));
+			strlcat(supported_clk_info, " ",
+				sizeof(supported_clk_info));
+		}
+	}
+
+	strlcat(supported_clk_info, "\n", sizeof(supported_clk_info));
+	return supported_clk_info;
+}
+
+static int cam_soc_util_clk_lvl_options_open(struct inode *inode,
+	struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t cam_soc_util_clk_lvl_options_read(struct file *file,
+	char __user *clk_info, size_t size_t, loff_t *loff_t)
+{
+	struct cam_hw_soc_info *soc_info =
+		(struct cam_hw_soc_info *)file->private_data;
+	const char *display_string =
+		cam_soc_util_get_supported_clk_levels(soc_info);
+
+	return simple_read_from_buffer(clk_info, size_t, loff_t, display_string,
+		strlen(display_string));
+}
+
+static const struct file_operations cam_soc_util_clk_lvl_options = {
+	.open = cam_soc_util_clk_lvl_options_open,
+	.read = cam_soc_util_clk_lvl_options_read,
+};
+
+static int cam_soc_util_set_clk_lvl(void *data, u64 val)
+{
+	struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data;
+
+	if (val <= CAM_SUSPEND_VOTE || val >= CAM_MAX_VOTE)
+		return 0;
+
+	if (soc_info->clk_level_valid[val] == true)
+		soc_info->clk_level_override = val;
+	else
+		soc_info->clk_level_override = 0;
+
+	return 0;
+}
+
+static int cam_soc_util_get_clk_lvl(void *data, u64 *val)
+{
+	struct cam_hw_soc_info *soc_info = (struct cam_hw_soc_info *)data;
+
+	*val = soc_info->clk_level_override;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(cam_soc_util_clk_lvl_control,
+	cam_soc_util_get_clk_lvl, cam_soc_util_set_clk_lvl, "%08llu");
+
+/**
+ * cam_soc_util_create_clk_lvl_debugfs()
+ *
+ * @brief:      Creates debugfs files to view/control device clk rates
+ *
+ * @soc_info:   Device soc information
+ *
+ * @return:     Success or failure
+ */
+static int cam_soc_util_create_clk_lvl_debugfs(
+	struct cam_hw_soc_info *soc_info)
+{
+	struct dentry *dentry = NULL;
+
+	if (!soc_info) {
+		CAM_ERR(CAM_UTIL, "soc info is NULL");
+		return -EINVAL;
+	}
+
+	if (soc_info->dentry)
+		return 0;
+
+	memset(debugfs_dir_name, 0, sizeof(debugfs_dir_name));
+	strlcat(debugfs_dir_name, "clk_dir_", sizeof(debugfs_dir_name));
+	strlcat(debugfs_dir_name, soc_info->dev_name, sizeof(debugfs_dir_name));
+
+	dentry = soc_info->dentry;
+	dentry = debugfs_create_dir(debugfs_dir_name, NULL);
+	if (!dentry) {
+		CAM_ERR(CAM_UTIL, "failed to create debug directory");
+		return -ENOMEM;
+	}
+
+	if (!debugfs_create_file("clk_lvl_options", 0444,
+		dentry, soc_info, &cam_soc_util_clk_lvl_options)) {
+		CAM_ERR(CAM_UTIL, "failed to create clk_lvl_options");
+		goto err;
+	}
+
+	if (!debugfs_create_file("clk_lvl_control", 0644,
+		dentry, soc_info, &cam_soc_util_clk_lvl_control)) {
+		CAM_ERR(CAM_UTIL, "failed to create clk_lvl_control");
+		goto err;
+	}
+
+	CAM_DBG(CAM_UTIL, "clk lvl debugfs for %s successfully created",
+		soc_info->dev_name);
+
+	return 0;
+
+err:
+	debugfs_remove_recursive(dentry);
+	dentry = NULL;
+	return -ENOMEM;
+}
+
+/**
+ * cam_soc_util_remove_clk_lvl_debugfs()
+ *
+ * @brief:      Removes the debugfs files used to view/control
+ *              device clk rates
+ *
+ * @soc_info:   Device soc information
+ *
+ */
+static void cam_soc_util_remove_clk_lvl_debugfs(
+	struct cam_hw_soc_info *soc_info)
+{
+	debugfs_remove_recursive(soc_info->dentry);
+	soc_info->dentry = NULL;
+}
+
+int cam_soc_util_get_level_from_string(const char *string,
+	enum cam_vote_level *level)
+{
+	if (!level)
+		return -EINVAL;
+
+	if (!strcmp(string, "suspend")) {
+		*level = CAM_SUSPEND_VOTE;
+	} else if (!strcmp(string, "minsvs")) {
+		*level = CAM_MINSVS_VOTE;
+	} else if (!strcmp(string, "lowsvs")) {
+		*level = CAM_LOWSVS_VOTE;
+	} else if (!strcmp(string, "svs")) {
+		*level = CAM_SVS_VOTE;
+	} else if (!strcmp(string, "svs_l1")) {
+		*level = CAM_SVSL1_VOTE;
+	} else if (!strcmp(string, "nominal")) {
+		*level = CAM_NOMINAL_VOTE;
+	} else if (!strcmp(string, "nominal_l1")) {
+		*level = CAM_NOMINALL1_VOTE;
+	} else if (!strcmp(string, "turbo")) {
+		*level = CAM_TURBO_VOTE;
+	} else {
+		CAM_ERR(CAM_UTIL, "Invalid string %s", string);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * cam_soc_util_get_clk_level_to_apply()
+ *
+ * @brief:              Get the clock level to apply. If the requested level
+ *                      is not valid, bump the level to next available valid
+ *                      level. If no higher level found, return failure.
+ *
+ * @soc_info:           Device soc struct to be populated
+ * @req_level:          Requested level
+ * @apply_level         Level to apply
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_get_clk_level_to_apply(
+	struct cam_hw_soc_info *soc_info, enum cam_vote_level req_level,
+	enum cam_vote_level *apply_level)
+{
+	if (req_level >= CAM_MAX_VOTE) {
+		CAM_ERR(CAM_UTIL, "Invalid clock level parameter %d",
+			req_level);
+		return -EINVAL;
+	}
+
+	if (soc_info->clk_level_valid[req_level] == true) {
+		*apply_level = req_level;
+	} else {
+		int i;
+
+		for (i = (req_level + 1); i < CAM_MAX_VOTE; i++)
+			if (soc_info->clk_level_valid[i] == true) {
+				*apply_level = i;
+				break;
+			}
+
+		if (i == CAM_MAX_VOTE) {
+			CAM_ERR(CAM_UTIL,
+				"No valid clock level found to apply, req=%d",
+				req_level);
+			return -EINVAL;
+		}
+	}
+
+	CAM_DBG(CAM_UTIL, "Req level %d, Applying %d",
+		req_level, *apply_level);
+
+	return 0;
+}
+
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info)
+{
+	if (!soc_info) {
+		CAM_ERR(CAM_UTIL, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (!soc_info->irq_line) {
+		CAM_ERR(CAM_UTIL, "No IRQ line available");
+		return -ENODEV;
+	}
+
+	enable_irq(soc_info->irq_line->start);
+
+	return 0;
+}
+
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info)
+{
+	if (!soc_info) {
+		CAM_ERR(CAM_UTIL, "Invalid arguments");
+		return -EINVAL;
+	}
+
+	if (!soc_info->irq_line) {
+		CAM_ERR(CAM_UTIL, "No IRQ line available");
+		return -ENODEV;
+	}
+
+	disable_irq(soc_info->irq_line->start);
+
+	return 0;
+}
+
+long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_index, unsigned long clk_rate)
+{
+	if (!soc_info || (clk_index >= soc_info->num_clk) || (clk_rate == 0)) {
+		CAM_ERR(CAM_UTIL, "Invalid input params %pK, %d %lu",
+			soc_info, clk_index, clk_rate);
+		return clk_rate;
+	}
+
+	return clk_round_rate(soc_info->clk[clk_index], clk_rate);
+}
+
+int cam_soc_util_set_clk_flags(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_index, unsigned long flags)
+{
+	if (!soc_info || (clk_index >= soc_info->num_clk)) {
+		CAM_ERR(CAM_UTIL, "Invalid input params %pK, %d",
+			soc_info, clk_index);
+		return -EINVAL;
+	}
+
+	return clk_set_flags(soc_info->clk[clk_index], flags);
+}
+
+/**
+ * cam_soc_util_set_clk_rate()
+ *
+ * @brief:          Sets the given rate for the clk requested for
+ *
+ * @clk:            Clock structure information for which rate is to be set
+ * @clk_name:       Name of the clock for which rate is being set
+ * @clk_rate        Clock rate to be set
+ *
+ * @return:         Success or failure
+ */
+static int cam_soc_util_set_clk_rate(struct clk *clk, const char *clk_name,
+	int32_t clk_rate)
+{
+	int rc = 0;
+	long clk_rate_round;
+
+	if (!clk || !clk_name)
+		return -EINVAL;
+
+	CAM_DBG(CAM_UTIL, "set %s, rate %d", clk_name, clk_rate);
+	if (clk_rate > 0) {
+		clk_rate_round = clk_round_rate(clk, clk_rate);
+		CAM_DBG(CAM_UTIL, "new_rate %ld", clk_rate_round);
+		if (clk_rate_round < 0) {
+			CAM_ERR(CAM_UTIL, "round failed for clock %s rc = %ld",
+				clk_name, clk_rate_round);
+			return clk_rate_round;
+		}
+		rc = clk_set_rate(clk, clk_rate_round);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
+			return rc;
+		}
+	} else if (clk_rate == INIT_RATE) {
+		clk_rate_round = clk_get_rate(clk);
+		CAM_DBG(CAM_UTIL, "init new_rate %ld", clk_rate_round);
+		if (clk_rate_round == 0) {
+			clk_rate_round = clk_round_rate(clk, 0);
+			if (clk_rate_round <= 0) {
+				CAM_ERR(CAM_UTIL, "round rate failed on %s",
+					clk_name);
+				return clk_rate_round;
+			}
+		}
+		rc = clk_set_rate(clk, clk_rate_round);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "set_rate failed on %s", clk_name);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info,
+	int32_t clk_rate)
+{
+	int32_t src_clk_idx;
+	struct clk *clk = NULL;
+
+	if (!soc_info || (soc_info->src_clk_idx < 0))
+		return -EINVAL;
+
+	if (soc_info->clk_level_override && clk_rate)
+		clk_rate = soc_info->clk_level_override;
+
+	src_clk_idx = soc_info->src_clk_idx;
+	clk = soc_info->clk[src_clk_idx];
+
+	return cam_soc_util_set_clk_rate(clk,
+		soc_info->clk_name[src_clk_idx], clk_rate);
+
+}
+
+int cam_soc_util_clk_put(struct clk **clk)
+{
+	if (!(*clk)) {
+		CAM_ERR(CAM_UTIL, "Invalid params clk");
+		return -EINVAL;
+	}
+
+	clk_put(*clk);
+	*clk = NULL;
+
+	return 0;
+}
+
+static struct clk *cam_soc_util_option_clk_get(struct device_node *np,
+	int index)
+{
+	struct of_phandle_args clkspec;
+	struct clk *clk;
+	int rc;
+
+	if (index < 0)
+		return ERR_PTR(-EINVAL);
+
+	rc = of_parse_phandle_with_args(np, "clocks-option", "#clock-cells",
+		index, &clkspec);
+	if (rc)
+		return ERR_PTR(rc);
+
+	clk = of_clk_get_from_provider(&clkspec);
+	of_node_put(clkspec.np);
+
+	return clk;
+}
+
+int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
+	const char *clk_name, struct clk **clk, int32_t *clk_index,
+	int32_t *clk_rate)
+{
+	int index = 0;
+	int rc = 0;
+	struct device_node *of_node = NULL;
+
+	if (!soc_info || !clk_name || !clk) {
+		CAM_ERR(CAM_UTIL,
+			"Invalid params soc_info %pK clk_name %s clk %pK",
+			soc_info, clk_name, clk);
+		return -EINVAL;
+	}
+
+	of_node = soc_info->dev->of_node;
+
+	index = of_property_match_string(of_node, "clock-names-option",
+		clk_name);
+
+	*clk = cam_soc_util_option_clk_get(of_node, index);
+	if (IS_ERR(*clk)) {
+		CAM_ERR(CAM_UTIL, "No clk named %s found. Dev %s", clk_name,
+			soc_info->dev_name);
+		*clk_index = -1;
+		return -EFAULT;
+	}
+	*clk_index = index;
+
+	rc = of_property_read_u32_index(of_node, "clock-rates-option",
+		index, clk_rate);
+	if (rc) {
+		CAM_ERR(CAM_UTIL,
+			"Error reading clock-rates clk_name %s index %d",
+			clk_name, index);
+		cam_soc_util_clk_put(clk);
+		*clk_rate = 0;
+		return rc;
+	}
+
+	/*
+	 * Option clocks are assumed to be available to single Device here.
+	 * Hence use INIT_RATE instead of NO_SET_RATE.
+	 */
+	*clk_rate = (*clk_rate == 0) ? (int32_t)INIT_RATE : *clk_rate;
+
+	CAM_DBG(CAM_UTIL, "clk_name %s index %d clk_rate %d",
+		clk_name, *clk_index, *clk_rate);
+
+	return 0;
+}
+
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate)
+{
+	int rc = 0;
+
+	if (!clk || !clk_name)
+		return -EINVAL;
+
+	rc = cam_soc_util_set_clk_rate(clk, clk_name, clk_rate);
+	if (rc)
+		return rc;
+
+	rc = clk_prepare_enable(clk);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "enable failed for %s: rc(%d)", clk_name, rc);
+		return rc;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name)
+{
+	if (!clk || !clk_name)
+		return -EINVAL;
+
+	CAM_DBG(CAM_UTIL, "disable %s", clk_name);
+	clk_disable_unprepare(clk);
+
+	return 0;
+}
+
+/**
+ * cam_soc_util_clk_enable_default()
+ *
+ * @brief:              This function enables the default clocks present
+ *                      in soc_info
+ *
+ * @soc_info:           Device soc struct to be populated
+ * @clk_level:          Clk level to apply while enabling
+ *
+ * @return:             success or failure
+ */
+int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level clk_level)
+{
+	int i, rc = 0;
+	enum cam_vote_level apply_level;
+
+	if ((soc_info->num_clk == 0) ||
+		(soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+		CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+			soc_info->num_clk);
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+		&apply_level);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		rc = cam_soc_util_clk_enable(soc_info->clk[i],
+			soc_info->clk_name[i],
+			soc_info->clk_rate[apply_level][i]);
+		if (rc)
+			goto clk_disable;
+	}
+
+	return rc;
+
+clk_disable:
+	for (i--; i >= 0; i--) {
+		cam_soc_util_clk_disable(soc_info->clk[i],
+			soc_info->clk_name[i]);
+	}
+
+	return rc;
+}
+
+/**
+ * cam_soc_util_clk_disable_default()
+ *
+ * @brief:              This function disables the default clocks present
+ *                      in soc_info
+ *
+ * @soc_info:           device soc struct to be populated
+ *
+ * @return:             success or failure
+ */
+void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+
+	if (soc_info->num_clk == 0)
+		return;
+
+	for (i = soc_info->num_clk - 1; i >= 0; i--)
+		cam_soc_util_clk_disable(soc_info->clk[i],
+			soc_info->clk_name[i]);
+}
+
+/**
+ * cam_soc_util_get_dt_clk_info()
+ *
+ * @brief:              Parse the DT and populate the Clock properties
+ *
+ * @soc_info:           device soc struct to be populated
+ * @src_clk_str         name of src clock that has rate control
+ *
+ * @return:             success or failure
+ */
+static int cam_soc_util_get_dt_clk_info(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	int count;
+	int num_clk_rates, num_clk_levels;
+	int i, j, rc;
+	int32_t num_clk_level_strings;
+	const char *src_clk_str = NULL;
+	const char *clk_control_debugfs = NULL;
+	const char *clk_cntl_lvl_string = NULL;
+	enum cam_vote_level level;
+
+	if (!soc_info || !soc_info->dev)
+		return -EINVAL;
+
+	of_node = soc_info->dev->of_node;
+
+	if (!of_property_read_bool(of_node, "use-shared-clk")) {
+		CAM_DBG(CAM_UTIL, "No shared clk parameter defined");
+		soc_info->use_shared_clk = false;
+	} else {
+		soc_info->use_shared_clk = true;
+	}
+
+	count = of_property_count_strings(of_node, "clock-names");
+
+	CAM_DBG(CAM_UTIL, "count = %d", count);
+	if (count > CAM_SOC_MAX_CLK) {
+		CAM_ERR(CAM_UTIL, "invalid count of clocks, count=%d", count);
+		rc = -EINVAL;
+		return rc;
+	}
+	if (count <= 0) {
+		CAM_DBG(CAM_UTIL, "No clock-names found");
+		count = 0;
+		soc_info->num_clk = count;
+		return 0;
+	}
+	soc_info->num_clk = count;
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node, "clock-names",
+				i, &(soc_info->clk_name[i]));
+		CAM_DBG(CAM_UTIL, "clock-names[%d] = %s",
+			i, soc_info->clk_name[i]);
+		if (rc) {
+			CAM_ERR(CAM_UTIL,
+				"i= %d count= %d reading clock-names failed",
+				i, count);
+			return rc;
+		}
+	}
+
+	num_clk_rates = of_property_count_u32_elems(of_node, "clock-rates");
+	if (num_clk_rates <= 0) {
+		CAM_ERR(CAM_UTIL, "reading clock-rates count failed");
+		return -EINVAL;
+	}
+
+	if ((num_clk_rates % soc_info->num_clk) != 0) {
+		CAM_ERR(CAM_UTIL,
+			"mismatch clk/rates, No of clocks=%d, No of rates=%d",
+			soc_info->num_clk, num_clk_rates);
+		return -EINVAL;
+	}
+
+	num_clk_levels = (num_clk_rates / soc_info->num_clk);
+
+	num_clk_level_strings = of_property_count_strings(of_node,
+		"clock-cntl-level");
+	if (num_clk_level_strings != num_clk_levels) {
+		CAM_ERR(CAM_UTIL,
+			"Mismatch No of levels=%d, No of level string=%d",
+			num_clk_levels, num_clk_level_strings);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_clk_levels; i++) {
+		rc = of_property_read_string_index(of_node,
+			"clock-cntl-level", i, &clk_cntl_lvl_string);
+		if (rc) {
+			CAM_ERR(CAM_UTIL,
+				"Error reading clock-cntl-level, rc=%d", rc);
+			return rc;
+		}
+
+		rc = cam_soc_util_get_level_from_string(clk_cntl_lvl_string,
+			&level);
+		if (rc)
+			return rc;
+
+		CAM_DBG(CAM_UTIL,
+			"[%d] : %s %d", i, clk_cntl_lvl_string, level);
+		soc_info->clk_level_valid[level] = true;
+		for (j = 0; j < soc_info->num_clk; j++) {
+			rc = of_property_read_u32_index(of_node, "clock-rates",
+				((i * soc_info->num_clk) + j),
+				&soc_info->clk_rate[level][j]);
+			if (rc) {
+				CAM_ERR(CAM_UTIL,
+					"Error reading clock-rates, rc=%d",
+					rc);
+				return rc;
+			}
+
+			soc_info->clk_rate[level][j] =
+				(soc_info->clk_rate[level][j] == 0) ?
+				(int32_t)NO_SET_RATE :
+				soc_info->clk_rate[level][j];
+
+			CAM_DBG(CAM_UTIL, "soc_info->clk_rate[%d][%d] = %d",
+				level, j,
+				soc_info->clk_rate[level][j]);
+		}
+	}
+
+	soc_info->src_clk_idx = -1;
+	rc = of_property_read_string_index(of_node, "src-clock-name", 0,
+		&src_clk_str);
+	if (rc || !src_clk_str) {
+		CAM_DBG(CAM_UTIL, "No src_clk_str found");
+		rc = 0;
+		goto end;
+	}
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		if (strcmp(soc_info->clk_name[i], src_clk_str) == 0) {
+			soc_info->src_clk_idx = i;
+			CAM_DBG(CAM_UTIL, "src clock = %s, index = %d",
+				src_clk_str, i);
+			break;
+		}
+	}
+
+	rc = of_property_read_string_index(of_node,
+		"clock-control-debugfs", 0, &clk_control_debugfs);
+	if (rc || !clk_control_debugfs) {
+		CAM_DBG(CAM_UTIL, "No clock_control_debugfs property found");
+		rc = 0;
+		goto end;
+	}
+
+	if (strcmp("true", clk_control_debugfs) == 0)
+		soc_info->clk_control_enable = true;
+
+end:
+	return rc;
+}
+
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level clk_level)
+{
+	int i, rc = 0;
+	enum cam_vote_level apply_level;
+
+	if ((soc_info->num_clk == 0) ||
+		(soc_info->num_clk >= CAM_SOC_MAX_CLK)) {
+		CAM_ERR(CAM_UTIL, "Invalid number of clock %d",
+			soc_info->num_clk);
+		return -EINVAL;
+	}
+
+	rc = cam_soc_util_get_clk_level_to_apply(soc_info, clk_level,
+		&apply_level);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < soc_info->num_clk; i++) {
+		rc = cam_soc_util_set_clk_rate(soc_info->clk[i],
+			soc_info->clk_name[i],
+			soc_info->clk_rate[apply_level][i]);
+		if (rc)
+			break;
+	}
+
+	return rc;
+};
+
+static int cam_soc_util_get_dt_gpio_req_tbl(struct device_node *of_node,
+	struct cam_soc_gpio_data *gconf, uint16_t *gpio_array,
+	uint16_t gpio_array_size)
+{
+	int32_t rc = 0, i = 0;
+	uint32_t count = 0;
+	uint32_t *val_array = NULL;
+
+	if (!of_get_property(of_node, "gpio-req-tbl-num", &count))
+		return 0;
+
+	count /= sizeof(uint32_t);
+	if (!count) {
+		CAM_ERR(CAM_UTIL, "gpio-req-tbl-num 0");
+		return 0;
+	}
+
+	val_array = kcalloc(count, sizeof(uint32_t), GFP_KERNEL);
+	if (!val_array)
+		return -ENOMEM;
+
+	gconf->cam_gpio_req_tbl = kcalloc(count, sizeof(struct gpio),
+		GFP_KERNEL);
+	if (!gconf->cam_gpio_req_tbl) {
+		rc = -ENOMEM;
+		goto free_val_array;
+	}
+	gconf->cam_gpio_req_tbl_size = count;
+
+	rc = of_property_read_u32_array(of_node, "gpio-req-tbl-num",
+		val_array, count);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "failed in reading gpio-req-tbl-num, rc = %d",
+			rc);
+		goto free_gpio_req_tbl;
+	}
+
+	for (i = 0; i < count; i++) {
+		if (val_array[i] >= gpio_array_size) {
+			CAM_ERR(CAM_UTIL, "gpio req tbl index %d invalid",
+				val_array[i]);
+			goto free_gpio_req_tbl;
+		}
+		gconf->cam_gpio_req_tbl[i].gpio = gpio_array[val_array[i]];
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].gpio = %d", i,
+			gconf->cam_gpio_req_tbl[i].gpio);
+	}
+
+	rc = of_property_read_u32_array(of_node, "gpio-req-tbl-flags",
+		val_array, count);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "Failed in gpio-req-tbl-flags, rc %d", rc);
+		goto free_gpio_req_tbl;
+	}
+
+	for (i = 0; i < count; i++) {
+		gconf->cam_gpio_req_tbl[i].flags = val_array[i];
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].flags = %ld", i,
+			gconf->cam_gpio_req_tbl[i].flags);
+	}
+
+	for (i = 0; i < count; i++) {
+		rc = of_property_read_string_index(of_node,
+			"gpio-req-tbl-label", i,
+			&gconf->cam_gpio_req_tbl[i].label);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "Failed rc %d", rc);
+			goto free_gpio_req_tbl;
+		}
+		CAM_DBG(CAM_UTIL, "cam_gpio_req_tbl[%d].label = %s", i,
+			gconf->cam_gpio_req_tbl[i].label);
+	}
+
+	kfree(val_array);
+
+	return rc;
+
+free_gpio_req_tbl:
+	kfree(gconf->cam_gpio_req_tbl);
+free_val_array:
+	kfree(val_array);
+	gconf->cam_gpio_req_tbl_size = 0;
+
+	return rc;
+}
+
+static int cam_soc_util_get_gpio_info(struct cam_hw_soc_info *soc_info)
+{
+	int32_t rc = 0, i = 0;
+	uint16_t *gpio_array = NULL;
+	int16_t gpio_array_size = 0;
+	struct cam_soc_gpio_data *gconf = NULL;
+	struct device_node *of_node = NULL;
+
+	if (!soc_info || !soc_info->dev)
+		return -EINVAL;
+
+	of_node = soc_info->dev->of_node;
+
+	/* Validate input parameters */
+	if (!of_node) {
+		CAM_ERR(CAM_UTIL, "Invalid param of_node");
+		return -EINVAL;
+	}
+
+	gpio_array_size = of_gpio_count(of_node);
+
+	if (gpio_array_size <= 0)
+		return 0;
+
+	CAM_DBG(CAM_UTIL, "gpio count %d", gpio_array_size);
+
+	gpio_array = kcalloc(gpio_array_size, sizeof(uint16_t), GFP_KERNEL);
+	if (!gpio_array)
+		goto free_gpio_conf;
+
+	for (i = 0; i < gpio_array_size; i++) {
+		gpio_array[i] = of_get_gpio(of_node, i);
+		CAM_DBG(CAM_UTIL, "gpio_array[%d] = %d", i, gpio_array[i]);
+	}
+
+	gconf = kzalloc(sizeof(*gconf), GFP_KERNEL);
+	if (!gconf)
+		return -ENOMEM;
+
+	rc = cam_soc_util_get_dt_gpio_req_tbl(of_node, gconf, gpio_array,
+		gpio_array_size);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "failed in msm_camera_get_dt_gpio_req_tbl");
+		goto free_gpio_array;
+	}
+
+	gconf->cam_gpio_common_tbl = kcalloc(gpio_array_size,
+				sizeof(struct gpio), GFP_KERNEL);
+	if (!gconf->cam_gpio_common_tbl) {
+		rc = -ENOMEM;
+		goto free_gpio_array;
+	}
+
+	for (i = 0; i < gpio_array_size; i++)
+		gconf->cam_gpio_common_tbl[i].gpio = gpio_array[i];
+
+	gconf->cam_gpio_common_tbl_size = gpio_array_size;
+	soc_info->gpio_data = gconf;
+	kfree(gpio_array);
+
+	return rc;
+
+free_gpio_array:
+	kfree(gpio_array);
+free_gpio_conf:
+	kfree(gconf);
+	soc_info->gpio_data = NULL;
+
+	return rc;
+}
+
+static int cam_soc_util_request_gpio_table(
+	struct cam_hw_soc_info *soc_info, bool gpio_en)
+{
+	int rc = 0, i = 0;
+	uint8_t size = 0;
+	struct cam_soc_gpio_data *gpio_conf =
+			soc_info->gpio_data;
+	struct gpio *gpio_tbl = NULL;
+
+
+	if (!gpio_conf) {
+		CAM_DBG(CAM_UTIL, "No GPIO entry");
+		return 0;
+	}
+	if (gpio_conf->cam_gpio_common_tbl_size <= 0) {
+		CAM_ERR(CAM_UTIL, "GPIO table size is invalid");
+		return -EINVAL;
+	}
+	size = gpio_conf->cam_gpio_req_tbl_size;
+	gpio_tbl = gpio_conf->cam_gpio_req_tbl;
+
+	if (!gpio_tbl || !size) {
+		CAM_ERR(CAM_UTIL, "Invalid gpio_tbl %pK / size %d",
+			gpio_tbl, size);
+		return -EINVAL;
+	}
+	for (i = 0; i < size; i++) {
+		CAM_DBG(CAM_UTIL, "i=%d, gpio=%d dir=%ld", i,
+			gpio_tbl[i].gpio, gpio_tbl[i].flags);
+	}
+	if (gpio_en) {
+		for (i = 0; i < size; i++) {
+			rc = gpio_request_one(gpio_tbl[i].gpio,
+				gpio_tbl[i].flags, gpio_tbl[i].label);
+			if (rc) {
+				/*
+				 * After GPIO request fails, contine to
+				 * apply new gpios, outout a error message
+				 * for driver bringup debug
+				 */
+				CAM_ERR(CAM_UTIL, "gpio %d:%s request fails",
+					gpio_tbl[i].gpio, gpio_tbl[i].label);
+			}
+		}
+	} else {
+		gpio_free_array(gpio_tbl, size);
+	}
+
+	return rc;
+}
+
+static int cam_soc_util_get_dt_regulator_info
+	(struct cam_hw_soc_info *soc_info)
+{
+	int rc = 0, count = 0, i = 0;
+	struct device_node *of_node = NULL;
+
+	if (!soc_info || !soc_info->dev) {
+		CAM_ERR(CAM_UTIL, "Invalid parameters");
+		return -EINVAL;
+	}
+
+	of_node = soc_info->dev->of_node;
+
+	soc_info->num_rgltr = 0;
+	count = of_property_count_strings(of_node, "regulator-names");
+	if (count != -EINVAL) {
+		if (count <= 0) {
+			CAM_ERR(CAM_UTIL, "no regulators found");
+			count = 0;
+			return -EINVAL;
+		}
+
+		soc_info->num_rgltr = count;
+
+	} else {
+		CAM_DBG(CAM_UTIL, "No regulators node found");
+		return 0;
+	}
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		rc = of_property_read_string_index(of_node,
+			"regulator-names", i, &soc_info->rgltr_name[i]);
+		CAM_DBG(CAM_UTIL, "rgltr_name[%d] = %s",
+			i, soc_info->rgltr_name[i]);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "no regulator resource at cnt=%d", i);
+			return -ENODEV;
+		}
+	}
+
+	if (!of_property_read_bool(of_node, "rgltr-cntrl-support")) {
+		CAM_DBG(CAM_UTIL, "No regulator control parameter defined");
+		soc_info->rgltr_ctrl_support = false;
+		return 0;
+	}
+
+	soc_info->rgltr_ctrl_support = true;
+
+	rc = of_property_read_u32_array(of_node, "rgltr-min-voltage",
+		soc_info->rgltr_min_volt, soc_info->num_rgltr);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "No minimum volatage value found, rc=%d", rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "rgltr-max-voltage",
+		soc_info->rgltr_max_volt, soc_info->num_rgltr);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "No maximum volatage value found, rc=%d", rc);
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32_array(of_node, "rgltr-load-current",
+		soc_info->rgltr_op_mode, soc_info->num_rgltr);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "No Load curent found rc=%d", rc);
+		return -EINVAL;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info)
+{
+	struct device_node *of_node = NULL;
+	int count = 0, i = 0, rc = 0;
+
+	if (!soc_info || !soc_info->dev)
+		return -EINVAL;
+
+	of_node = soc_info->dev->of_node;
+
+	rc = of_property_read_u32(of_node, "cell-index", &soc_info->index);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "device %s failed to read cell-index",
+			soc_info->dev_name);
+		return rc;
+	}
+
+	count = of_property_count_strings(of_node, "reg-names");
+	if (count <= 0) {
+		CAM_DBG(CAM_UTIL, "no reg-names found for: %s",
+			soc_info->dev_name);
+		count = 0;
+	}
+	soc_info->num_mem_block = count;
+
+	for (i = 0; i < soc_info->num_mem_block; i++) {
+		rc = of_property_read_string_index(of_node, "reg-names", i,
+			&soc_info->mem_block_name[i]);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "failed to read reg-names at %d", i);
+			return rc;
+		}
+		soc_info->mem_block[i] =
+			platform_get_resource_byname(soc_info->pdev,
+			IORESOURCE_MEM, soc_info->mem_block_name[i]);
+
+		if (!soc_info->mem_block[i]) {
+			CAM_ERR(CAM_UTIL, "no mem resource by name %s",
+				soc_info->mem_block_name[i]);
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	if (soc_info->num_mem_block > 0) {
+		rc = of_property_read_u32_array(of_node, "reg-cam-base",
+			soc_info->mem_block_cam_base, soc_info->num_mem_block);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "Error reading register offsets");
+			return rc;
+		}
+	}
+
+	rc = of_property_read_string_index(of_node, "interrupt-names", 0,
+		&soc_info->irq_name);
+	if (rc) {
+		CAM_DBG(CAM_UTIL, "No interrupt line preset for: %s",
+			soc_info->dev_name);
+		rc = 0;
+	} else {
+		soc_info->irq_line =
+			platform_get_resource_byname(soc_info->pdev,
+			IORESOURCE_IRQ, soc_info->irq_name);
+		if (!soc_info->irq_line) {
+			CAM_ERR(CAM_UTIL, "no irq resource");
+			rc = -ENODEV;
+			return rc;
+		}
+	}
+
+	rc = cam_soc_util_get_dt_regulator_info(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_get_dt_clk_info(soc_info);
+	if (rc)
+		return rc;
+
+	rc = cam_soc_util_get_gpio_info(soc_info);
+	if (rc)
+		return rc;
+
+	return rc;
+}
+
+/**
+ * cam_soc_util_get_regulator()
+ *
+ * @brief:              Get regulator resource named vdd
+ *
+ * @dev:                Device associated with regulator
+ * @reg:                Return pointer to be filled with regulator on success
+ * @rgltr_name:         Name of regulator to get
+ *
+ * @return:             0 for Success, negative value for failure
+ */
+static int cam_soc_util_get_regulator(struct device *dev,
+	struct regulator **reg, const char *rgltr_name)
+{
+	int rc = 0;
+	*reg = regulator_get(dev, rgltr_name);
+	if (IS_ERR_OR_NULL(*reg)) {
+		rc = PTR_ERR(*reg);
+		rc = rc ? rc : -EINVAL;
+		CAM_ERR(CAM_UTIL, "Regulator %s get failed %d", rgltr_name, rc);
+		*reg = NULL;
+	}
+	return rc;
+}
+
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+	const char *rgltr_name, uint32_t rgltr_min_volt,
+	uint32_t rgltr_max_volt, uint32_t rgltr_op_mode,
+	uint32_t rgltr_delay_ms)
+{
+	int32_t rc = 0;
+
+	if (!rgltr) {
+		CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
+		return -EINVAL;
+	}
+
+	rc = regulator_disable(rgltr);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "%s regulator disable failed", rgltr_name);
+		return rc;
+	}
+
+	if (rgltr_delay_ms > 20)
+		msleep(rgltr_delay_ms);
+	else if (rgltr_delay_ms)
+		usleep_range(rgltr_delay_ms * 1000,
+			(rgltr_delay_ms * 1000) + 1000);
+
+	if (regulator_count_voltages(rgltr) > 0) {
+		regulator_set_load(rgltr, 0);
+		regulator_set_voltage(rgltr, 0, rgltr_max_volt);
+	}
+
+	return rc;
+}
+
+
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+	const char *rgltr_name,
+	uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+	uint32_t rgltr_op_mode, uint32_t rgltr_delay)
+{
+	int32_t rc = 0;
+
+	if (!rgltr) {
+		CAM_ERR(CAM_UTIL, "Invalid NULL parameter");
+		return -EINVAL;
+	}
+
+	if (regulator_count_voltages(rgltr) > 0) {
+		CAM_DBG(CAM_UTIL, "voltage min=%d, max=%d",
+			rgltr_min_volt, rgltr_max_volt);
+
+		rc = regulator_set_voltage(
+			rgltr, rgltr_min_volt, rgltr_max_volt);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "%s set voltage failed", rgltr_name);
+			return rc;
+		}
+
+		rc = regulator_set_load(rgltr, rgltr_op_mode);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "%s set optimum mode failed",
+				rgltr_name);
+			return rc;
+		}
+	}
+
+	rc = regulator_enable(rgltr);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "%s regulator_enable failed", rgltr_name);
+		return rc;
+	}
+
+	if (rgltr_delay > 20)
+		msleep(rgltr_delay);
+	else if (rgltr_delay)
+		usleep_range(rgltr_delay * 1000,
+			(rgltr_delay * 1000) + 1000);
+
+	return rc;
+}
+
+static int cam_soc_util_request_pinctrl(
+	struct cam_hw_soc_info *soc_info)
+{
+
+	struct cam_soc_pinctrl_info *device_pctrl = &soc_info->pinctrl_info;
+	struct device *dev = soc_info->dev;
+
+	device_pctrl->pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR_OR_NULL(device_pctrl->pinctrl)) {
+		CAM_DBG(CAM_UTIL, "Pinctrl not available");
+		device_pctrl->pinctrl = NULL;
+		return 0;
+	}
+	device_pctrl->gpio_state_active =
+		pinctrl_lookup_state(device_pctrl->pinctrl,
+				CAM_SOC_PINCTRL_STATE_DEFAULT);
+	if (IS_ERR_OR_NULL(device_pctrl->gpio_state_active)) {
+		CAM_ERR(CAM_UTIL,
+			"Failed to get the active state pinctrl handle");
+		device_pctrl->gpio_state_active = NULL;
+		return -EINVAL;
+	}
+	device_pctrl->gpio_state_suspend
+		= pinctrl_lookup_state(device_pctrl->pinctrl,
+				CAM_SOC_PINCTRL_STATE_SLEEP);
+	if (IS_ERR_OR_NULL(device_pctrl->gpio_state_suspend)) {
+		CAM_ERR(CAM_UTIL,
+			"Failed to get the suspend state pinctrl handle");
+		device_pctrl->gpio_state_suspend = NULL;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void cam_soc_util_regulator_disable_default(
+	struct cam_hw_soc_info *soc_info)
+{
+	int j = 0;
+	uint32_t num_rgltr = soc_info->num_rgltr;
+
+	for (j = num_rgltr-1; j >= 0; j--) {
+		if (soc_info->rgltr_ctrl_support == true) {
+			cam_soc_util_regulator_disable(soc_info->rgltr[j],
+				soc_info->rgltr_name[j],
+				soc_info->rgltr_min_volt[j],
+				soc_info->rgltr_max_volt[j],
+				soc_info->rgltr_op_mode[j],
+				soc_info->rgltr_delay[j]);
+		} else {
+			if (soc_info->rgltr[j])
+				regulator_disable(soc_info->rgltr[j]);
+		}
+	}
+}
+
+static int cam_soc_util_regulator_enable_default(
+	struct cam_hw_soc_info *soc_info)
+{
+	int j = 0, rc = 0;
+	uint32_t num_rgltr = soc_info->num_rgltr;
+
+	for (j = 0; j < num_rgltr; j++) {
+		if (soc_info->rgltr_ctrl_support == true) {
+			rc = cam_soc_util_regulator_enable(soc_info->rgltr[j],
+				soc_info->rgltr_name[j],
+				soc_info->rgltr_min_volt[j],
+				soc_info->rgltr_max_volt[j],
+				soc_info->rgltr_op_mode[j],
+				soc_info->rgltr_delay[j]);
+		} else {
+			if (soc_info->rgltr[j])
+				rc = regulator_enable(soc_info->rgltr[j]);
+		}
+
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "%s enable failed",
+				soc_info->rgltr_name[j]);
+			goto disable_rgltr;
+		}
+	}
+
+	return rc;
+disable_rgltr:
+
+	for (j--; j >= 0; j--) {
+		if (soc_info->rgltr_ctrl_support == true) {
+			cam_soc_util_regulator_disable(soc_info->rgltr[j],
+				soc_info->rgltr_name[j],
+				soc_info->rgltr_min_volt[j],
+				soc_info->rgltr_max_volt[j],
+				soc_info->rgltr_op_mode[j],
+				soc_info->rgltr_delay[j]);
+		} else {
+			if (soc_info->rgltr[j])
+				regulator_disable(soc_info->rgltr[j]);
+		}
+	}
+
+	return rc;
+}
+
+int cam_soc_util_request_platform_resource(
+	struct cam_hw_soc_info *soc_info,
+	irq_handler_t handler, void *irq_data)
+{
+	int i = 0, rc = 0;
+
+	if (!soc_info || !soc_info->dev) {
+		CAM_ERR(CAM_UTIL, "Invalid parameters");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < soc_info->num_mem_block; i++) {
+		if (soc_info->reserve_mem) {
+			if (!request_mem_region(soc_info->mem_block[i]->start,
+				resource_size(soc_info->mem_block[i]),
+				soc_info->mem_block_name[i])){
+				CAM_ERR(CAM_UTIL,
+					"Error Mem region request Failed:%s",
+					soc_info->mem_block_name[i]);
+				rc = -ENOMEM;
+				goto unmap_base;
+			}
+		}
+		soc_info->reg_map[i].mem_base = ioremap(
+			soc_info->mem_block[i]->start,
+			resource_size(soc_info->mem_block[i]));
+		if (!soc_info->reg_map[i].mem_base) {
+			CAM_ERR(CAM_UTIL, "i= %d base NULL", i);
+			rc = -ENOMEM;
+			goto unmap_base;
+		}
+		soc_info->reg_map[i].mem_cam_base =
+			soc_info->mem_block_cam_base[i];
+		soc_info->reg_map[i].size =
+			resource_size(soc_info->mem_block[i]);
+		soc_info->num_reg_map++;
+	}
+
+	for (i = 0; i < soc_info->num_rgltr; i++) {
+		if (soc_info->rgltr_name[i] == NULL) {
+			CAM_ERR(CAM_UTIL, "can't find regulator name");
+			goto put_regulator;
+		}
+
+		rc = cam_soc_util_get_regulator(soc_info->dev,
+			&soc_info->rgltr[i],
+			soc_info->rgltr_name[i]);
+		if (rc)
+			goto put_regulator;
+	}
+
+	if (soc_info->irq_line) {
+		rc = devm_request_irq(soc_info->dev, soc_info->irq_line->start,
+			handler, IRQF_TRIGGER_RISING,
+			soc_info->irq_name, irq_data);
+		if (rc) {
+			CAM_ERR(CAM_UTIL, "irq request fail");
+			rc = -EBUSY;
+			goto put_regulator;
+		}
+		disable_irq(soc_info->irq_line->start);
+		soc_info->irq_data = irq_data;
+	}
+
+	/* Get Clock */
+	for (i = 0; i < soc_info->num_clk; i++) {
+		soc_info->clk[i] = clk_get(soc_info->dev,
+			soc_info->clk_name[i]);
+		if (!soc_info->clk[i]) {
+			CAM_ERR(CAM_UTIL, "get failed for %s",
+				soc_info->clk_name[i]);
+			rc = -ENOENT;
+			goto put_clk;
+		}
+	}
+
+	rc = cam_soc_util_request_pinctrl(soc_info);
+	if (rc)
+		CAM_DBG(CAM_UTIL, "Failed in request pinctrl, rc=%d", rc);
+
+	rc = cam_soc_util_request_gpio_table(soc_info, true);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "Failed in request gpio table, rc=%d", rc);
+		goto put_clk;
+	}
+
+	if (soc_info->clk_control_enable)
+		cam_soc_util_create_clk_lvl_debugfs(soc_info);
+
+	return rc;
+
+put_clk:
+	if (i == -1)
+		i = soc_info->num_clk;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->clk[i]) {
+			clk_put(soc_info->clk[i]);
+			soc_info->clk[i] = NULL;
+		}
+	}
+
+	if (soc_info->irq_line) {
+		disable_irq(soc_info->irq_line->start);
+		devm_free_irq(soc_info->dev,
+			soc_info->irq_line->start, irq_data);
+	}
+
+put_regulator:
+	if (i == -1)
+		i = soc_info->num_rgltr;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i]) {
+			regulator_disable(soc_info->rgltr[i]);
+			regulator_put(soc_info->rgltr[i]);
+			soc_info->rgltr[i] = NULL;
+		}
+	}
+
+unmap_base:
+	if (i == -1)
+		i = soc_info->num_reg_map;
+	for (i = i - 1; i >= 0; i--) {
+		if (soc_info->reserve_mem)
+			release_mem_region(soc_info->mem_block[i]->start,
+				resource_size(soc_info->mem_block[i]));
+		iounmap(soc_info->reg_map[i].mem_base);
+		soc_info->reg_map[i].mem_base = NULL;
+		soc_info->reg_map[i].size = 0;
+	}
+
+	return rc;
+}
+
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info)
+{
+	int i;
+
+	if (!soc_info || !soc_info->dev) {
+		CAM_ERR(CAM_UTIL, "Invalid parameter");
+		return -EINVAL;
+	}
+
+	for (i = soc_info->num_clk - 1; i >= 0; i--) {
+		clk_put(soc_info->clk[i]);
+		soc_info->clk[i] = NULL;
+	}
+
+	for (i = soc_info->num_rgltr - 1; i >= 0; i--) {
+		if (soc_info->rgltr[i]) {
+			regulator_put(soc_info->rgltr[i]);
+			soc_info->rgltr[i] = NULL;
+		}
+	}
+
+	for (i = soc_info->num_reg_map - 1; i >= 0; i--) {
+		iounmap(soc_info->reg_map[i].mem_base);
+		soc_info->reg_map[i].mem_base = NULL;
+		soc_info->reg_map[i].size = 0;
+	}
+
+	if (soc_info->irq_line) {
+		disable_irq(soc_info->irq_line->start);
+		devm_free_irq(soc_info->dev,
+			soc_info->irq_line->start, soc_info->irq_data);
+	}
+
+	if (soc_info->pinctrl_info.pinctrl)
+		devm_pinctrl_put(soc_info->pinctrl_info.pinctrl);
+
+
+	/* release for gpio */
+	cam_soc_util_request_gpio_table(soc_info, false);
+
+	if (soc_info->clk_control_enable)
+		cam_soc_util_remove_clk_lvl_debugfs(soc_info);
+
+	return 0;
+}
+
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq)
+{
+	int rc = 0;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	rc = cam_soc_util_regulator_enable_default(soc_info);
+	if (rc) {
+		CAM_ERR(CAM_UTIL, "Regulators enable failed");
+		return rc;
+	}
+
+	if (enable_clocks) {
+		rc = cam_soc_util_clk_enable_default(soc_info, clk_level);
+		if (rc)
+			goto disable_regulator;
+	}
+
+	if (enable_irq) {
+		rc  = cam_soc_util_irq_enable(soc_info);
+		if (rc)
+			goto disable_clk;
+	}
+
+	if (soc_info->pinctrl_info.pinctrl &&
+		soc_info->pinctrl_info.gpio_state_active) {
+		rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+			soc_info->pinctrl_info.gpio_state_active);
+
+		if (rc)
+			goto disable_irq;
+	}
+
+	return rc;
+
+disable_irq:
+	if (enable_irq)
+		cam_soc_util_irq_disable(soc_info);
+
+disable_clk:
+	if (enable_clocks)
+		cam_soc_util_clk_disable_default(soc_info);
+
+disable_regulator:
+	cam_soc_util_regulator_disable_default(soc_info);
+
+
+	return rc;
+}
+
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq)
+{
+	int rc = 0;
+
+	if (!soc_info)
+		return -EINVAL;
+
+	if (disable_irq)
+		rc |= cam_soc_util_irq_disable(soc_info);
+
+	if (disable_clocks)
+		cam_soc_util_clk_disable_default(soc_info);
+
+	cam_soc_util_regulator_disable_default(soc_info);
+
+	if (soc_info->pinctrl_info.pinctrl &&
+		soc_info->pinctrl_info.gpio_state_suspend)
+		rc = pinctrl_select_state(soc_info->pinctrl_info.pinctrl,
+			soc_info->pinctrl_info.gpio_state_suspend);
+
+	return rc;
+}
+
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, int size)
+{
+	void __iomem     *base_addr = NULL;
+
+	CAM_DBG(CAM_UTIL, "base_idx %u size=%d", base_index, size);
+
+	if (!soc_info || base_index >= soc_info->num_reg_map ||
+		size <= 0 || (offset + size) >=
+		CAM_SOC_GET_REG_MAP_SIZE(soc_info, base_index))
+		return -EINVAL;
+
+	base_addr = CAM_SOC_GET_REG_MAP_START(soc_info, base_index);
+
+	/*
+	 * All error checking already done above,
+	 * hence ignoring the return value below.
+	 */
+	cam_io_dump(base_addr, offset, size);
+
+	return 0;
+}
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
new file mode 100644
index 0000000..a3290da
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_soc_util.h
@@ -0,0 +1,621 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CAM_SOC_UTIL_H_
+#define _CAM_SOC_UTIL_H_
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk/qcom.h>
+#include <linux/debugfs.h>
+
+#include "cam_io_util.h"
+
+#define NO_SET_RATE  -1
+#define INIT_RATE    -2
+
+/* maximum number of device block */
+#define CAM_SOC_MAX_BLOCK           4
+
+/* maximum number of device base */
+#define CAM_SOC_MAX_BASE            CAM_SOC_MAX_BLOCK
+
+/* maximum number of device regulator */
+#define CAM_SOC_MAX_REGULATOR       5
+
+/* maximum number of device clock */
+#define CAM_SOC_MAX_CLK             32
+
+/**
+ * enum cam_vote_level - Enum for voting level
+ *
+ * @CAM_SUSPEND_VOTE  : Suspend vote
+ * @CAM_MINSVS_VOTE   : Min SVS vote
+ * @CAM_LOWSVS_VOTE   : Low SVS vote
+ * @CAM_SVS_VOTE      : SVS vote
+ * @CAM_SVSL1_VOTE    : SVS Plus vote
+ * @CAM_NOMINAL_VOTE  : Nominal vote
+ * @CAM_NOMINALL1_VOTE: Nominal plus vote
+ * @CAM_TURBO_VOTE    : Turbo vote
+ * @CAM_MAX_VOTE      : Max voting level, This is invalid level.
+ */
+enum cam_vote_level {
+	CAM_SUSPEND_VOTE,
+	CAM_MINSVS_VOTE,
+	CAM_LOWSVS_VOTE,
+	CAM_SVS_VOTE,
+	CAM_SVSL1_VOTE,
+	CAM_NOMINAL_VOTE,
+	CAM_NOMINALL1_VOTE,
+	CAM_TURBO_VOTE,
+	CAM_MAX_VOTE,
+};
+
+/* pinctrl states */
+#define CAM_SOC_PINCTRL_STATE_SLEEP "cam_suspend"
+#define CAM_SOC_PINCTRL_STATE_DEFAULT "cam_default"
+
+/**
+ * struct cam_soc_reg_map:   Information about the mapped register space
+ *
+ * @mem_base:               Starting location of MAPPED register space
+ * @mem_cam_base:           Starting offset of this register space compared
+ *                          to ENTIRE Camera register space
+ * @size:                   Size of register space
+ **/
+struct cam_soc_reg_map {
+	void __iomem                   *mem_base;
+	uint32_t                        mem_cam_base;
+	resource_size_t                 size;
+};
+
+/**
+ * struct cam_soc_pinctrl_info:   Information about pinctrl data
+ *
+ * @pinctrl:               pintrl object
+ * @gpio_state_active:     default pinctrl state
+ * @gpio_state_suspend     suspend state of pinctrl
+ **/
+struct cam_soc_pinctrl_info {
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+};
+
+/**
+ * struct cam_soc_gpio_data:   Information about the gpio pins
+ *
+ * @cam_gpio_common_tbl:       It is list of al the gpios present in gpios node
+ * @cam_gpio_common_tbl_size:  It is equal to number of gpios prsent in
+ *                             gpios node in DTSI
+ * @cam_gpio_req_tbl            It is list of al the requesetd gpios
+ * @cam_gpio_req_tbl_size:      It is size of requested gpios
+ **/
+struct cam_soc_gpio_data {
+	struct gpio *cam_gpio_common_tbl;
+	uint8_t cam_gpio_common_tbl_size;
+	struct gpio *cam_gpio_req_tbl;
+	uint8_t cam_gpio_req_tbl_size;
+};
+
+/**
+ * struct cam_hw_soc_info:  Soc information pertaining to specific instance of
+ *                          Camera hardware driver module
+ *
+ * @pdev:                   Platform device pointer
+ * @device:                 Device pointer
+ * @hw_version:             Camera device version
+ * @index:                  Instance id for the camera device
+ * @dev_name:               Device Name
+ * @irq_name:               Name of the irq associated with the device
+ * @irq_line:               Irq resource
+ * @irq_data:               Private data that is passed when IRQ is requested
+ * @num_mem_block:          Number of entry in the "reg-names"
+ * @mem_block_name:         Array of the reg block name
+ * @mem_block_cam_base:     Array of offset of this register space compared
+ *                          to ENTIRE Camera register space
+ * @mem_block:              Associated resource structs
+ * @reg_map:                Array of Mapped register info for the "reg-names"
+ * @num_reg_map:            Number of mapped register space associated
+ *                          with mem_block. num_reg_map = num_mem_block in
+ *                          most cases
+ * @reserve_mem:            Whether to reserve memory for Mem blocks
+ * @num_rgltr:              Number of regulators
+ * @rgltr_name:             Array of regulator names
+ * @rgltr_ctrl_support:     Whether regulator control is supported
+ * @rgltr_min_volt:         Array of minimum regulator voltage
+ * @rgltr_max_volt:         Array of maximum regulator voltage
+ * @rgltr_op_mode:          Array of regulator operation mode
+ * @rgltr_type:             Array of regulator names
+ * @rgltr:                  Array of associated regulator resources
+ * @rgltr_delay:            Array of regulator delay values
+ * @num_clk:                Number of clocks
+ * @clk_name:               Array of clock names
+ * @clk:                    Array of associated clock resources
+ * @clk_rate:               2D array of clock rates representing clock rate
+ *                          values at different vote levels
+ * @src_clk_idx:            Source clock index that is rate-controllable
+ * @clk_level_valid:        Indicates whether corresponding level is valid
+ * @gpio_data:              Pointer to gpio info
+ * @pinctrl_info:           Pointer to pinctrl info
+ * @dentry:                 Debugfs entry
+ * @clk_level_override:     Clk level set from debugfs
+ * @clk_control:            Enable/disable clk rate control through debugfs
+ * @soc_private:            Soc private data
+ */
+struct cam_hw_soc_info {
+	struct platform_device         *pdev;
+	struct device                  *dev;
+	uint32_t                        hw_version;
+	uint32_t                        index;
+	const char                     *dev_name;
+	const char                     *irq_name;
+	struct resource                *irq_line;
+	void                           *irq_data;
+
+	uint32_t                        num_mem_block;
+	const char                     *mem_block_name[CAM_SOC_MAX_BLOCK];
+	uint32_t                        mem_block_cam_base[CAM_SOC_MAX_BLOCK];
+	struct resource                *mem_block[CAM_SOC_MAX_BLOCK];
+	struct cam_soc_reg_map          reg_map[CAM_SOC_MAX_BASE];
+	uint32_t                        num_reg_map;
+	uint32_t                        reserve_mem;
+
+	uint32_t                        num_rgltr;
+	const char                     *rgltr_name[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_ctrl_support;
+	uint32_t                        rgltr_min_volt[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_max_volt[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_op_mode[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_type[CAM_SOC_MAX_REGULATOR];
+	struct regulator               *rgltr[CAM_SOC_MAX_REGULATOR];
+	uint32_t                        rgltr_delay[CAM_SOC_MAX_REGULATOR];
+
+	uint32_t                        use_shared_clk;
+	uint32_t                        num_clk;
+	const char                     *clk_name[CAM_SOC_MAX_CLK];
+	struct clk                     *clk[CAM_SOC_MAX_CLK];
+	int32_t                         clk_rate[CAM_MAX_VOTE][CAM_SOC_MAX_CLK];
+	int32_t                         src_clk_idx;
+	bool                            clk_level_valid[CAM_MAX_VOTE];
+
+	struct cam_soc_gpio_data       *gpio_data;
+	struct cam_soc_pinctrl_info     pinctrl_info;
+
+	struct dentry                  *dentry;
+	uint32_t                        clk_level_override;
+	bool                            clk_control_enable;
+
+	void                           *soc_private;
+};
+
+/*
+ * CAM_SOC_GET_REG_MAP_START
+ *
+ * @brief:              This MACRO will get the mapped starting address
+ *                      where the register space can be accessed
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns a pointer to the mapped register memory
+ */
+#define CAM_SOC_GET_REG_MAP_START(__soc_info, __base_index)          \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		NULL : __soc_info->reg_map[__base_index].mem_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_CAM_BASE
+ *
+ * @brief:              This MACRO will get the cam_base of the
+ *                      register space
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns an int32_t value.
+ *                        Failure: -1
+ *                        Success: Starting offset of register space compared
+ *                                 to entire Camera Register Map
+ */
+#define CAM_SOC_GET_REG_MAP_CAM_BASE(__soc_info, __base_index)       \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		-1 : __soc_info->reg_map[__base_index].mem_cam_base)
+
+/*
+ * CAM_SOC_GET_REG_MAP_SIZE
+ *
+ * @brief:              This MACRO will get the size of the mapped
+ *                      register space
+ *
+ * @__soc_info:         Device soc information
+ * @__base_index:       Index of register space in the HW block
+ *
+ * @return:             Returns a uint32_t value.
+ *                        Failure: 0
+ *                        Success: Non-zero size of mapped register space
+ */
+#define CAM_SOC_GET_REG_MAP_SIZE(__soc_info, __base_index)           \
+	((!__soc_info || __base_index >= __soc_info->num_reg_map) ?  \
+		0 : __soc_info->reg_map[__base_index].size)
+
+/**
+ * cam_soc_util_get_level_from_string()
+ *
+ * @brief:              Get the associated vote level for the input string
+ *
+ * @string:             Input string to compare with.
+ * @level:              Vote level corresponds to input string.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_get_level_from_string(const char *string,
+	enum cam_vote_level *level);
+
+/**
+ * cam_soc_util_get_dt_properties()
+ *
+ * @brief:              Parse the DT and populate the common properties that
+ *                      are part of the soc_info structure - register map,
+ *                      clocks, regulators, irq, etc.
+ *
+ * @soc_info:           Device soc struct to be populated
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_get_dt_properties(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_request_platform_resource()
+ *
+ * @brief:              Request regulator, irq, and clock resources
+ *
+ * @soc_info:           Device soc information
+ * @handler:            Irq handler function pointer
+ * @irq_data:           Irq handler function CB data
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_request_platform_resource(struct cam_hw_soc_info *soc_info,
+	irq_handler_t handler, void *irq_data);
+
+/**
+ * cam_soc_util_release_platform_resource()
+ *
+ * @brief:              Release regulator, irq, and clock resources
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_release_platform_resource(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_enable_platform_resource()
+ *
+ * @brief:              Enable regulator, irq resources
+ *
+ * @soc_info:           Device soc information
+ * @enable_clocks:      Boolean flag:
+ *                          TRUE: Enable all clocks in soc_info Now.
+ *                          False: Don't enable clocks Now. Driver will
+ *                                 enable independently.
+ * @clk_level:          Clock level to be applied.
+ *                      Applicable only if enable_clocks is true
+ *                          Valid range : 0 to (CAM_MAX_VOTE - 1)
+ * @enable_irq:         Boolean flag:
+ *                          TRUE: Enable IRQ in soc_info Now.
+ *                          False: Don't enable IRQ Now. Driver will
+ *                                 enable independently.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_enable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool enable_clocks, enum cam_vote_level clk_level, bool enable_irq);
+
+/**
+ * cam_soc_util_disable_platform_resource()
+ *
+ * @brief:              Disable regulator, irq resources
+ *
+ * @soc_info:           Device soc information
+ * @disable_irq:        Boolean flag:
+ *                          TRUE: Disable IRQ in soc_info Now.
+ *                          False: Don't disable IRQ Now. Driver will
+ *                                 disable independently.
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_disable_platform_resource(struct cam_hw_soc_info *soc_info,
+	bool disable_clocks, bool disable_irq);
+
+/**
+ * cam_soc_util_get_clk_round_rate()
+ *
+ * @brief:              Get the rounded clock rate for the given clock's
+ *                      clock rate value
+ *
+ * @soc_info:           Device soc information
+ * @clk_index:          Clock index in soc_info for which round rate is needed
+ * @clk_rate:           Input clock rate for which rounded rate is needed
+ *
+ * @return:             Rounded clock rate
+ */
+long cam_soc_util_get_clk_round_rate(struct cam_hw_soc_info *soc_info,
+	uint32_t clk_index, unsigned long clk_rate);
+
+/**
+ * cam_soc_util_set_clk_flags()
+ *
+ * @brief:              Camera SOC util to set the flags for a specified clock
+ *
+ * @soc_info:           Device soc information
+ * @clk_index:          Clock index in soc_info for which flags are to be set
+ * @flags:              Flags to set
+ *
+ * @return:             Success or Failure
+ */
+int cam_soc_util_set_clk_flags(struct cam_hw_soc_info *soc_info,
+	 uint32_t clk_index, unsigned long flags);
+
+/**
+ * cam_soc_util_set_src_clk_rate()
+ *
+ * @brief:              Set the rate on the source clock.
+ *
+ * @soc_info:           Device soc information
+ * @clk_rate:           Clock rate associated with the src clk
+ *
+ * @return:             success or failure
+ */
+int cam_soc_util_set_src_clk_rate(struct cam_hw_soc_info *soc_info,
+	int32_t clk_rate);
+
+/**
+ * cam_soc_util_get_option_clk_by_name()
+ *
+ * @brief:              Get reference to optional clk using name
+ *
+ * @soc_info:           Device soc information
+ * @clk_name:           Name of clock to find reference for
+ * @clk:                Clock reference pointer to be filled if Success
+ * @clk_index:          Clk index in the option clk array to be returned
+ * @clk_rate:           Clk rate in the option clk array
+ *
+ * @return:             0: Success
+ *                      Negative: Failure
+ */
+int cam_soc_util_get_option_clk_by_name(struct cam_hw_soc_info *soc_info,
+	const char *clk_name, struct clk **clk, int32_t *clk_index,
+	int32_t *clk_rate);
+
+/**
+ * cam_soc_util_clk_put()
+ *
+ * @brief:              Put clock specified in params
+ *
+ * @clk:                Reference to the Clock that needs to be put
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_put(struct clk **clk);
+
+/**
+ * cam_soc_util_clk_enable()
+ *
+ * @brief:              Enable clock specified in params
+ *
+ * @clk:                Clock that needs to be turned ON
+ * @clk_name:           Clocks name associated with clk
+ * @clk_rate:           Clocks rate associated with clk
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_enable(struct clk *clk, const char *clk_name,
+	int32_t clk_rate);
+
+/**
+ * cam_soc_util_set_clk_rate_level()
+ *
+ * @brief:              Apply clock rates for the requested level.
+ *                      This applies the new requested level for all
+ *                      the clocks listed in DT based on their values.
+ *
+ * @soc_info:           Device soc information
+ * @clk_level:          Clock level number to set
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_set_clk_rate_level(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level clk_level);
+
+/**
+ * cam_soc_util_clk_disable()
+ *
+ * @brief:              Disable clock specified in params
+ *
+ * @clk:                Clock that needs to be turned OFF
+ * @clk_name:           Clocks name associated with clk
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_clk_disable(struct clk *clk, const char *clk_name);
+
+/**
+ * cam_soc_util_irq_enable()
+ *
+ * @brief:              Enable IRQ in SOC
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_irq_enable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_irq_disable()
+ *
+ * @brief:              Disable IRQ in SOC
+ *
+ * @soc_info:           Device soc information
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_irq_disable(struct cam_hw_soc_info *soc_info);
+
+/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief:              Enable single regulator
+ *
+ * @rgltr               Regulator that needs to be turned ON
+ * @rgltr_name          Associated Regulator name
+ * @rgltr_min_volt:     Requested minimum volatage
+ * @rgltr_max_volt:     Requested maximum volatage
+ * @rgltr_op_mode:      Requested Load
+ * @rgltr_delay:        Requested delay needed aaftre enabling regulator
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_regulator_enable(struct regulator *rgltr,
+	const char *rgltr_name,
+	uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+	uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+/**
+ * cam_soc_util_regulator_enable()
+ *
+ * @brief:              Disable single regulator
+ *
+ * @rgltr               Regulator that needs to be turned ON
+ * @rgltr_name          Associated Regulator name
+ * @rgltr_min_volt:     Requested minimum volatage
+ * @rgltr_max_volt:     Requested maximum volatage
+ * @rgltr_op_mode:      Requested Load
+ * @rgltr_delay:        Requested delay needed aaftre enabling regulator
+ *
+ * @return:             Success or failure
+ */
+int cam_soc_util_regulator_disable(struct regulator *rgltr,
+	const char *rgltr_name,
+	uint32_t rgltr_min_volt, uint32_t rgltr_max_volt,
+	uint32_t rgltr_op_mode, uint32_t rgltr_delay);
+
+
+/**
+ * cam_soc_util_w()
+ *
+ * @brief:              Camera SOC util for register write
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ * @data:               Value to be written
+ *
+ * @return:             Success or Failure
+ */
+static inline int cam_soc_util_w(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, uint32_t data)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return -EINVAL;
+	return cam_io_w(data,
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_w_mb()
+ *
+ * @brief:              Camera SOC util for register write with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call wmb() independently in the caller.
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ * @data:               Value to be written
+ *
+ * @return:             Success or Failure
+ */
+static inline int cam_soc_util_w_mb(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, uint32_t data)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return -EINVAL;
+	return cam_io_w_mb(data,
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r()
+ *
+ * @brief:              Camera SOC util for register read
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return 0;
+	return cam_io_r(
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_r_mb()
+ *
+ * @brief:              Camera SOC util for register read with memory barrier.
+ *                      Memory Barrier is only before the write to ensure the
+ *                      order. If need to ensure this write is also flushed
+ *                      call rmb() independently in the caller.
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Offset of register to be read
+ *
+ * @return:             Value read from the register address
+ */
+static inline uint32_t cam_soc_util_r_mb(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset)
+{
+	if (!CAM_SOC_GET_REG_MAP_START(soc_info, base_index))
+		return 0;
+	return cam_io_r_mb(
+		CAM_SOC_GET_REG_MAP_START(soc_info, base_index) + offset);
+}
+
+/**
+ * cam_soc_util_reg_dump()
+ *
+ * @brief:              Camera SOC util for dumping a range of register
+ *
+ * @soc_info:           Device soc information
+ * @base_index:         Index of register space in the HW block
+ * @offset:             Start register offset for the dump
+ * @size:               Size specifying the range for dump
+ *
+ * @return:             Success or Failure
+ */
+int cam_soc_util_reg_dump(struct cam_hw_soc_info *soc_info,
+	uint32_t base_index, uint32_t offset, int size);
+
+void cam_soc_util_clk_disable_default(struct cam_hw_soc_info *soc_info);
+
+int cam_soc_util_clk_enable_default(struct cam_hw_soc_info *soc_info,
+	enum cam_vote_level clk_level);
+
+#endif /* _CAM_SOC_UTIL_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.c b/drivers/media/platform/msm/camera/cam_utils/cam_trace.c
new file mode 100644
index 0000000..d287526
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+/* Instantiate tracepoints */
+#define CREATE_TRACE_POINTS
+#include "cam_trace.h"
diff --git a/drivers/media/platform/msm/camera/cam_utils/cam_trace.h b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
new file mode 100644
index 0000000..8d96d52
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_utils/cam_trace.h
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+
+#if !defined(_CAM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _CAM_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM camera
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cam_trace
+
+#include <linux/tracepoint.h>
+#include <media/cam_req_mgr.h>
+#include "cam_req_mgr_core.h"
+#include "cam_req_mgr_interface.h"
+#include "cam_context.h"
+
+TRACE_EVENT(cam_context_state,
+	TP_PROTO(const char *name, struct cam_context *ctx),
+	TP_ARGS(name, ctx),
+	TP_STRUCT__entry(
+		__field(void*, ctx)
+		__field(uint32_t, state)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->ctx = ctx;
+		__entry->state = ctx->state;
+		__assign_str(name, name);
+	),
+	TP_printk(
+		"%s: State ctx=%p ctx_state=%u",
+			__get_str(name), __entry->ctx, __entry->state
+	)
+);
+
+TRACE_EVENT(cam_isp_activated_irq,
+	TP_PROTO(struct cam_context *ctx, unsigned int substate,
+		unsigned int event, uint64_t timestamp),
+	TP_ARGS(ctx, substate, event, timestamp),
+	TP_STRUCT__entry(
+		__field(void*, ctx)
+		__field(uint32_t, state)
+		__field(uint32_t, substate)
+		__field(uint32_t, event)
+		__field(uint64_t, ts)
+	),
+	TP_fast_assign(
+		__entry->ctx = ctx;
+		__entry->state = ctx->state;
+		__entry->substate = substate;
+		__entry->event = event;
+		__entry->ts = timestamp;
+	),
+	TP_printk(
+		"ISP: IRQ ctx=%p ctx_state=%u substate=%u event=%u ts=%llu",
+			__entry->ctx, __entry->state, __entry->substate,
+			__entry->event, __entry->ts
+	)
+);
+
+TRACE_EVENT(cam_icp_fw_dbg,
+	TP_PROTO(char *dbg_message, uint64_t timestamp),
+	TP_ARGS(dbg_message, timestamp),
+	TP_STRUCT__entry(
+		__string(dbg_message, dbg_message)
+		__field(uint64_t, timestamp)
+	),
+	TP_fast_assign(
+		__assign_str(dbg_message, dbg_message);
+		__entry->timestamp = timestamp;
+	),
+	TP_printk(
+		"%llu %s: ",
+		 __entry->timestamp, __get_str(dbg_message)
+	)
+);
+
+TRACE_EVENT(cam_buf_done,
+	TP_PROTO(const char *ctx_type, struct cam_context *ctx,
+		struct cam_ctx_request *req),
+	TP_ARGS(ctx_type, ctx, req),
+	TP_STRUCT__entry(
+		__string(ctx_type, ctx_type)
+		__field(void*, ctx)
+		__field(uint64_t, request)
+	),
+	TP_fast_assign(
+		__assign_str(ctx_type, ctx_type);
+		__entry->ctx = ctx;
+		__entry->request = req->request_id;
+	),
+	TP_printk(
+		"%5s: BufDone ctx=%p request=%llu",
+			__get_str(ctx_type), __entry->ctx, __entry->request
+	)
+);
+
+TRACE_EVENT(cam_apply_req,
+	TP_PROTO(const char *entity, uint64_t req_id),
+	TP_ARGS(entity, req_id),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint64_t, req_id)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->req_id = req_id;
+	),
+	TP_printk(
+		"%8s: ApplyRequest request=%llu",
+			__get_str(entity), __entry->req_id
+	)
+);
+
+TRACE_EVENT(cam_flush_req,
+	TP_PROTO(struct cam_req_mgr_core_link *link,
+		struct cam_req_mgr_flush_info *info),
+	TP_ARGS(link, info),
+	TP_STRUCT__entry(
+		__field(uint32_t, type)
+		__field(int64_t, req_id)
+		__field(void*, link)
+		__field(void*, session)
+	),
+	TP_fast_assign(
+		__entry->type    = info->flush_type;
+		__entry->req_id  = info->req_id;
+		__entry->link    = link;
+		__entry->session = link->parent;
+	),
+	TP_printk(
+		"FlushRequest type=%u request=%llu link=%pK session=%pK",
+			__entry->type, __entry->req_id, __entry->link,
+			__entry->session
+	)
+);
+
+TRACE_EVENT(cam_req_mgr_connect_device,
+	TP_PROTO(struct cam_req_mgr_core_link *link,
+		struct cam_req_mgr_device_info *info),
+	TP_ARGS(link, info),
+	TP_STRUCT__entry(
+		__string(name, info->name)
+		__field(uint32_t, id)
+		__field(uint32_t, delay)
+		__field(void*, link)
+		__field(void*, session)
+	),
+	TP_fast_assign(
+		__assign_str(name, info->name);
+		__entry->id      = info->dev_id;
+		__entry->delay   = info->p_delay;
+		__entry->link    = link;
+		__entry->session = link->parent;
+	),
+	TP_printk(
+		"ReqMgr Connect name=%s id=%u pd=%d link=%pK session=%pK",
+			__get_str(name), __entry->id, __entry->delay,
+			__entry->link, __entry->session
+	)
+);
+
+TRACE_EVENT(cam_req_mgr_apply_request,
+	TP_PROTO(struct cam_req_mgr_core_link *link,
+		struct cam_req_mgr_apply_request *req,
+		struct cam_req_mgr_connected_device *dev),
+	TP_ARGS(link, req, dev),
+	TP_STRUCT__entry(
+		__string(name, dev->dev_info.name)
+		__field(uint32_t, dev_id)
+		__field(uint64_t, req_id)
+		__field(void*, link)
+		__field(void*, session)
+	),
+	TP_fast_assign(
+		__assign_str(name, dev->dev_info.name);
+		__entry->dev_id  = dev->dev_info.dev_id;
+		__entry->req_id  = req->request_id;
+		__entry->link    = link;
+		__entry->session = link->parent;
+	),
+	TP_printk(
+		"ReqMgr ApplyRequest devname=%s devid=%u request=%lld link=%pK session=%pK",
+			__get_str(name), __entry->dev_id, __entry->req_id,
+			__entry->link, __entry->session
+	)
+);
+
+TRACE_EVENT(cam_req_mgr_add_req,
+	TP_PROTO(struct cam_req_mgr_core_link *link,
+		int idx, struct cam_req_mgr_add_request *add_req,
+		struct cam_req_mgr_req_tbl *tbl,
+		struct cam_req_mgr_connected_device *dev),
+	TP_ARGS(link, idx, add_req, tbl, dev),
+	TP_STRUCT__entry(
+		__string(name, dev->dev_info.name)
+		__field(uint32_t, dev_id)
+		__field(uint64_t, req_id)
+		__field(uint32_t, slot_id)
+		__field(uint32_t, delay)
+		__field(uint32_t, readymap)
+		__field(uint32_t, devicemap)
+		__field(void*, link)
+		__field(void*, session)
+	),
+	TP_fast_assign(
+		__assign_str(name, dev->dev_info.name);
+		__entry->dev_id    = dev->dev_info.dev_id;
+		__entry->req_id    = add_req->req_id;
+		__entry->slot_id   = idx;
+		__entry->delay     = tbl->pd;
+		__entry->readymap  = tbl->slot[idx].req_ready_map;
+		__entry->devicemap = tbl->dev_mask;
+		__entry->link      = link;
+		__entry->session   = link->parent;
+	),
+	TP_printk(
+		"ReqMgr AddRequest devname=%s devid=%d request=%lld slot=%d pd=%d readymap=%x devicemap=%d link=%pK session=%pK",
+			__get_str(name), __entry->dev_id, __entry->req_id,
+			__entry->slot_id, __entry->delay, __entry->readymap,
+			__entry->devicemap, __entry->link, __entry->session
+	)
+);
+
+TRACE_EVENT(cam_submit_to_hw,
+	TP_PROTO(const char *entity, uint64_t req_id),
+	TP_ARGS(entity, req_id),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint64_t, req_id)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->req_id = req_id;
+	),
+	TP_printk(
+		"%8s: submit request=%llu",
+			__get_str(entity), __entry->req_id
+	)
+);
+
+TRACE_EVENT(cam_irq_activated,
+	TP_PROTO(const char *entity, uint32_t irq_type),
+	TP_ARGS(entity, irq_type),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint32_t, irq_type)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->irq_type = irq_type;
+	),
+	TP_printk(
+		"%8s: got irq type=%d",
+			__get_str(entity), __entry->irq_type
+	)
+);
+
+TRACE_EVENT(cam_irq_handled,
+	TP_PROTO(const char *entity, uint32_t irq_type),
+	TP_ARGS(entity, irq_type),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint32_t, irq_type)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->irq_type = irq_type;
+	),
+	TP_printk(
+		"%8s: handled irq type=%d",
+			__get_str(entity), __entry->irq_type
+	)
+);
+
+TRACE_EVENT(cam_cdm_cb,
+	TP_PROTO(const char *entity, uint32_t status),
+	TP_ARGS(entity, status),
+	TP_STRUCT__entry(
+		__string(entity, entity)
+		__field(uint32_t, status)
+	),
+	TP_fast_assign(
+		__assign_str(entity, entity);
+		__entry->status = status;
+	),
+	TP_printk(
+		"%8s: cdm cb status=%d",
+			__get_str(entity), __entry->status
+	)
+);
+
+#endif /* _CAM_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index c46422a..a5176d9 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1371,7 +1371,7 @@
 		goto rotator_open_error;
 	}
 
-	ctx->slice = llcc_slice_getd(rot_dev->dev, "rotator");
+	ctx->slice = llcc_slice_getd(LLCC_ROTATOR);
 	if (IS_ERR(ctx->slice)) {
 		rc = PTR_ERR(ctx->slice);
 		SDEROT_ERR("failed to get system cache %d\n", rc);
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
index 39c36ff..81188e4 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -96,6 +96,8 @@
 	void *priv_handle;
 };
 
+#if defined(CONFIG_MSM_SDE_ROTATOR)
+
 void *sde_rotator_inline_open(struct platform_device *pdev);
 int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
 		u32 src_pixfmt, u32 *dst_pixfmt);
@@ -109,4 +111,51 @@
 int sde_rotator_inline_release(void *handle);
 void sde_rotator_inline_reg_dump(struct platform_device *pdev);
 
+#else
+
+void *sde_rotator_inline_open(struct platform_device *pdev)
+{
+	return NULL;
+}
+
+int sde_rotator_inline_get_dst_pixfmt(struct platform_device *pdev,
+		u32 src_pixfmt, u32 *dst_pixfmt)
+{
+	return 0;
+}
+
+int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
+		char *downscale_caps, int len)
+{
+	return 0;
+}
+
+int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev)
+{
+	return 0;
+}
+
+int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
+		bool input, u32 *pixfmt, int len)
+{
+	return 0;
+}
+
+int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
+		enum sde_rotator_inline_cmd_type cmd_type)
+{
+	return 0;
+}
+
+int sde_rotator_inline_release(void *handle)
+{
+	return 0;
+}
+
+void sde_rotator_inline_reg_dump(struct platform_device *pdev)
+{
+}
+
+#endif
+
 #endif /* __SDE_ROTATOR_INLINE_H__ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 5f4e447..eedcdf6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * RMNET configuration engine
  *
  */
@@ -21,6 +13,9 @@
 #include "rmnet_handlers.h"
 #include "rmnet_vnd.h"
 #include "rmnet_private.h"
+#include "rmnet_map.h"
+#include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
 
 /* Locking scheme -
  * The shared resource which needs to be protected is realdev->rx_handler_data.
@@ -43,15 +38,17 @@
 
 /* Local Definitions and Declarations */
 
-static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
+static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 2] = {
 	[IFLA_RMNET_MUX_ID]	= { .type = NLA_U16 },
 	[IFLA_RMNET_FLAGS]	= { .len = sizeof(struct ifla_rmnet_flags) },
+	[IFLA_VLAN_EGRESS_QOS]	= { .len = sizeof(struct tcmsg) },
 };
 
-static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
+int rmnet_is_real_dev_registered(const struct net_device *real_dev)
 {
 	return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
 }
+EXPORT_SYMBOL(rmnet_is_real_dev_registered);
 
 /* Needs rtnl lock */
 static struct rmnet_port*
@@ -66,6 +63,9 @@
 	if (port->nr_rmnet_devs)
 		return -EINVAL;
 
+	rmnet_map_cmd_exit(port);
+	rmnet_map_tx_aggregate_exit(port);
+
 	kfree(port);
 
 	netdev_rx_handler_unregister(real_dev);
@@ -97,13 +97,15 @@
 		kfree(port);
 		return -EBUSY;
 	}
-
 	/* hold on to real dev for MAP data */
 	dev_hold(real_dev);
 
 	for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
 		INIT_HLIST_HEAD(&port->muxed_ep[entry]);
 
+	rmnet_map_tx_aggregate_init(port);
+	rmnet_map_cmd_init(port);
+
 	netdev_dbg(real_dev, "registered with rmnet\n");
 	return 0;
 }
@@ -211,6 +213,7 @@
 		hlist_del_init_rcu(&ep->hlnode);
 		rmnet_unregister_bridge(dev, port);
 		rmnet_vnd_dellink(mux_id, port, ep);
+		synchronize_rcu();
 		kfree(ep);
 	}
 	rmnet_unregister_real_device(real_dev, port);
@@ -234,7 +237,6 @@
 
 	port = rmnet_get_port_rtnl(dev);
 
-	rcu_read_lock();
 	rmnet_unregister_bridge(dev, port);
 
 	hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
@@ -242,12 +244,14 @@
 		rmnet_vnd_dellink(ep->mux_id, port, ep);
 
 		hlist_del_init_rcu(&ep->hlnode);
+		synchronize_rcu();
 		kfree(ep);
 	}
 
-	rcu_read_unlock();
 	unregister_netdevice_many(&list);
 
+	qmi_rmnet_qmi_exit(port->qmi_info, port);
+
 	rmnet_unregister_real_device(real_dev, port);
 }
 
@@ -281,12 +285,15 @@
 {
 	u16 mux_id;
 
-	if (!data || !data[IFLA_RMNET_MUX_ID])
+	if (!data) {
 		return -EINVAL;
-
-	mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
-	if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
-		return -ERANGE;
+	} else {
+		if (data[IFLA_RMNET_MUX_ID]) {
+			mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+			if (mux_id > (RMNET_MAX_LOGICAL_EP - 1))
+				return -ERANGE;
+		}
+	}
 
 	return 0;
 }
@@ -329,6 +336,13 @@
 		port->data_format = flags->flags & flags->mask;
 	}
 
+	if (data[IFLA_VLAN_EGRESS_QOS]) {
+		struct tcmsg *tcm;
+
+		tcm = nla_data(data[IFLA_VLAN_EGRESS_QOS]);
+		qmi_rmnet_change_link(dev, port, tcm);
+	}
+
 	return 0;
 }
 
@@ -338,7 +352,8 @@
 		/* IFLA_RMNET_MUX_ID */
 		nla_total_size(2) +
 		/* IFLA_RMNET_FLAGS */
-		nla_total_size(sizeof(struct ifla_rmnet_flags));
+		nla_total_size(sizeof(struct ifla_rmnet_flags)) +
+		nla_total_size(sizeof(struct tcmsg));
 }
 
 static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
@@ -393,6 +408,7 @@
 	else
 		return NULL;
 }
+EXPORT_SYMBOL(rmnet_get_port);
 
 struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
 {
@@ -405,6 +421,7 @@
 
 	return NULL;
 }
+EXPORT_SYMBOL(rmnet_get_endpoint);
 
 int rmnet_add_bridge(struct net_device *rmnet_dev,
 		     struct net_device *slave_dev,
@@ -459,6 +476,140 @@
 	return 0;
 }
 
+#ifdef CONFIG_QCOM_QMI_RMNET
+void *rmnet_get_qmi_pt(void *port)
+{
+	if (port)
+		return ((struct rmnet_port *)port)->qmi_info;
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_qmi_pt);
+
+void *rmnet_get_qos_pt(struct net_device *dev)
+{
+	struct rmnet_priv *priv;
+
+	if (dev) {
+		priv = netdev_priv(dev);
+		return rcu_dereference(priv->qos_info);
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_qos_pt);
+
+void *rmnet_get_rmnet_port(struct net_device *dev)
+{
+	struct rmnet_priv *priv;
+
+	if (dev) {
+		priv = netdev_priv(dev);
+		return (void *)rmnet_get_port(priv->real_dev);
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_rmnet_port);
+
+struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id)
+{
+	struct rmnet_endpoint *ep;
+
+	if (port) {
+		ep = rmnet_get_endpoint((struct rmnet_port *)port, mux_id);
+		if (ep)
+			return ep->egress_dev;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_rmnet_dev);
+
+void rmnet_reset_qmi_pt(void *port)
+{
+	if (port)
+		((struct rmnet_port *)port)->qmi_info = NULL;
+}
+EXPORT_SYMBOL(rmnet_reset_qmi_pt);
+
+void rmnet_init_qmi_pt(void *port, void *qmi)
+{
+	if (port)
+		((struct rmnet_port *)port)->qmi_info = qmi;
+}
+EXPORT_SYMBOL(rmnet_init_qmi_pt);
+
+void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
+{
+	struct rmnet_priv *priv;
+	struct rmnet_pcpu_stats *ps;
+	unsigned int cpu, start;
+
+	struct rmnet_endpoint *ep;
+	unsigned long bkt;
+
+	if (!port || !tx || !rx)
+		return;
+
+	*tx = 0;
+	*rx = 0;
+	hash_for_each(((struct rmnet_port *)port)->muxed_ep, bkt, ep, hlnode) {
+		priv = netdev_priv(ep->egress_dev);
+		for_each_possible_cpu(cpu) {
+			ps = per_cpu_ptr(priv->pcpu_stats, cpu);
+			do {
+				start = u64_stats_fetch_begin_irq(&ps->syncp);
+				*tx += ps->stats.tx_pkts;
+				*rx += ps->stats.rx_pkts;
+			} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
+		}
+	}
+}
+EXPORT_SYMBOL(rmnet_get_packets);
+
+void  rmnet_set_powersave_format(void *port)
+{
+	if (!port)
+		return;
+	((struct rmnet_port *)port)->data_format |= RMNET_INGRESS_FORMAT_PS;
+}
+EXPORT_SYMBOL(rmnet_set_powersave_format);
+
+void  rmnet_clear_powersave_format(void *port)
+{
+	if (!port)
+		return;
+	((struct rmnet_port *)port)->data_format &= ~RMNET_INGRESS_FORMAT_PS;
+}
+EXPORT_SYMBOL(rmnet_clear_powersave_format);
+
+void rmnet_enable_all_flows(void *port)
+{
+	struct rmnet_endpoint *ep;
+	unsigned long bkt;
+
+	if (unlikely(!port))
+		return;
+
+	rcu_read_lock();
+	hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
+			  bkt, ep, hlnode) {
+		qmi_rmnet_enable_all_flows(ep->egress_dev);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL(rmnet_enable_all_flows);
+
+int rmnet_get_powersave_notif(void *port)
+{
+	if (!port)
+		return 0;
+	return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
+}
+EXPORT_SYMBOL(rmnet_get_powersave_notif);
+#endif
+
 /* Startup/Shutdown */
 
 static int __init rmnet_init(void)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 34ac45a..292d353 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -1,14 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * RMNET Data configuration engine
  *
  */
@@ -27,6 +19,20 @@
 	struct hlist_node hlnode;
 };
 
+struct rmnet_port_priv_stats {
+	u64 dl_hdr_last_seq;
+	u64 dl_hdr_last_bytes;
+	u64 dl_hdr_last_pkts;
+	u64 dl_hdr_last_flows;
+	u64 dl_hdr_count;
+	u64 dl_hdr_total_bytes;
+	u64 dl_hdr_total_pkts;
+	u64 dl_hdr_avg_bytes;
+	u64 dl_hdr_avg_pkts;
+	u64 dl_trl_last_seq;
+	u64 dl_trl_count;
+};
+
 /* One instance of this structure is instantiated for each real_dev associated
  * with rmnet.
  */
@@ -37,6 +43,27 @@
 	u8 rmnet_mode;
 	struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
 	struct net_device *bridge_ep;
+	void *rmnet_perf;
+
+	u16 egress_agg_size;
+	u16 egress_agg_count;
+
+	/* Protect aggregation related elements */
+	spinlock_t agg_lock;
+
+	struct sk_buff *agg_skb;
+	int agg_state;
+	u8 agg_count;
+	struct timespec agg_time;
+	struct timespec agg_last;
+	struct hrtimer hrtimer;
+
+	void *qmi_info;
+
+	/* dl marker elements */
+	struct list_head dl_list;
+	struct rmnet_port_priv_stats stats;
+	int dl_marker_flush;
 };
 
 extern struct rtnl_link_ops rmnet_link_ops;
@@ -72,8 +99,25 @@
 	struct rmnet_pcpu_stats __percpu *pcpu_stats;
 	struct gro_cells gro_cells;
 	struct rmnet_priv_stats stats;
+	void __rcu *qos_info;
 };
 
+enum rmnet_trace_func {
+	RMNET_MODULE,
+	NW_STACK_MODULE,
+};
+
+enum rmnet_trace_evt {
+	RMNET_DLVR_SKB,
+	RMNET_RCV_FROM_PND,
+	RMNET_TX_UL_PKT,
+	NW_STACK_DEV_Q_XMIT,
+	NW_STACK_NAPI_GRO_FLUSH,
+	NW_STACK_RX,
+	NW_STACK_TX,
+};
+
+int rmnet_is_real_dev_registered(const struct net_device *real_dev);
 struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
 struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
 int rmnet_add_bridge(struct net_device *rmnet_dev,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 11167ab..2b357e2 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * RMNET Data ingress/egress handler
  *
  */
@@ -16,19 +8,58 @@
 #include <linux/netdevice.h>
 #include <linux/netdev_features.h>
 #include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
 #include <net/sock.h>
+#include <linux/tracepoint.h>
 #include "rmnet_private.h"
 #include "rmnet_config.h"
 #include "rmnet_vnd.h"
 #include "rmnet_map.h"
 #include "rmnet_handlers.h"
+#ifdef CONFIG_QCOM_QMI_HELPERS
+#include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
+
+#endif
 
 #define RMNET_IP_VERSION_4 0x40
 #define RMNET_IP_VERSION_6 0x60
+#define CREATE_TRACE_POINTS
+#include <trace/events/rmnet.h>
+
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_low);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_high);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_err);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_low);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_high);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_shs_wq_err);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_low);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_high);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_perf_err);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_low);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_high);
+EXPORT_TRACEPOINT_SYMBOL(rmnet_err);
 
 /* Helper Functions */
 
-static void rmnet_set_skb_proto(struct sk_buff *skb)
+static int rmnet_check_skb_can_gro(struct sk_buff *skb)
+{
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+			return 0;
+		break;
+	case htons(ETH_P_IPV6):
+		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+			return 0;
+		/* Fall through */
+	}
+
+	return -EPROTONOSUPPORT;
+}
+
+void rmnet_set_skb_proto(struct sk_buff *skb)
 {
 	switch (skb->data[0] & 0xF0) {
 	case RMNET_IP_VERSION_4:
@@ -42,22 +73,55 @@
 		break;
 	}
 }
+EXPORT_SYMBOL(rmnet_set_skb_proto);
+
+/* Shs hook handler */
+
+int (*rmnet_shs_skb_entry)(struct sk_buff *skb,
+			   struct rmnet_port *port) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_skb_entry);
 
 /* Generic handler */
 
-static void
-rmnet_deliver_skb(struct sk_buff *skb)
+void
+rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port)
 {
+	int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
 	struct rmnet_priv *priv = netdev_priv(skb->dev);
 
+	trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
+			0xDEF, 0xDEF, (void *)skb, NULL);
 	skb_reset_transport_header(skb);
 	skb_reset_network_header(skb);
-	rmnet_vnd_rx_fixup(skb, skb->dev);
+	rmnet_vnd_rx_fixup(skb->dev, skb->len);
 
 	skb->pkt_type = PACKET_HOST;
 	skb_set_mac_header(skb, 0);
-	gro_cells_receive(&priv->gro_cells, skb);
+
+	rmnet_shs_stamp = rcu_dereference(rmnet_shs_skb_entry);
+	if (rmnet_shs_stamp) {
+		rmnet_shs_stamp(skb, port);
+		return;
+	}
+
+	if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
+		if (!rmnet_check_skb_can_gro(skb) &&
+		    port->dl_marker_flush >= 0) {
+			struct napi_struct *napi = get_current_napi_context();
+
+			napi_gro_receive(napi, skb);
+			port->dl_marker_flush++;
+		} else {
+			netif_receive_skb(skb);
+		}
+	} else {
+		if (!rmnet_check_skb_can_gro(skb))
+			gro_cells_receive(&priv->gro_cells, skb);
+		else
+			netif_receive_skb(skb);
+	}
 }
+EXPORT_SYMBOL(rmnet_deliver_skb);
 
 /* MAP handler */
 
@@ -70,6 +134,11 @@
 	u8 mux_id;
 
 	if (RMNET_MAP_GET_CD_BIT(skb)) {
+		if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
+			if (!rmnet_map_flow_command(skb, port, false))
+				return;
+		}
+
 		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
 			return rmnet_map_command(skb, port);
 
@@ -98,19 +167,27 @@
 			skb->ip_summed = CHECKSUM_UNNECESSARY;
 	}
 
+#ifdef CONFIG_QCOM_QMI_HELPERS
+	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
+		qmi_rmnet_work_maybe_restart(port);
+#endif
+
 	skb_trim(skb, len);
-	rmnet_deliver_skb(skb);
+	rmnet_deliver_skb(skb, port);
 	return;
 
 free_skb:
 	kfree_skb(skb);
 }
 
+int (*rmnet_perf_deag_entry)(struct sk_buff *skb,
+			     struct rmnet_port *port) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_perf_deag_entry);
+
 static void
 rmnet_map_ingress_handler(struct sk_buff *skb,
 			  struct rmnet_port *port)
 {
-	struct sk_buff *skbn;
 
 	if (skb->dev->type == ARPHRD_ETHER) {
 		if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
@@ -122,10 +199,30 @@
 	}
 
 	if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
-		while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
-			__rmnet_map_ingress_handler(skbn, port);
+		int (*rmnet_perf_core_deaggregate)(struct sk_buff *skb,
+						   struct rmnet_port *port);
+		/* Deaggregation and freeing of HW originating
+		 * buffers is done within here
+		 */
+		rmnet_perf_core_deaggregate =
+					rcu_dereference(rmnet_perf_deag_entry);
+		if (rmnet_perf_core_deaggregate) {
+			rmnet_perf_core_deaggregate(skb, port);
+		} else {
+			struct sk_buff *skbn;
 
-		consume_skb(skb);
+			while (skb) {
+				struct sk_buff *skb_frag =
+						skb_shinfo(skb)->frag_list;
+
+				skb_shinfo(skb)->frag_list = NULL;
+				while ((skbn = rmnet_map_deaggregate(skb, port))
+					!= NULL)
+					__rmnet_map_ingress_handler(skbn, port);
+				consume_skb(skb);
+				skb = skb_frag;
+			}
+		}
 	} else {
 		__rmnet_map_ingress_handler(skb, port);
 	}
@@ -151,6 +248,11 @@
 			return -ENOMEM;
 	}
 
+#ifdef CONFIG_QCOM_QMI_HELPERS
+	if (port->data_format & RMNET_INGRESS_FORMAT_PS)
+		qmi_rmnet_work_maybe_restart(port);
+#endif
+
 	if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
 		rmnet_map_checksum_uplink_packet(skb, orig_dev);
 
@@ -160,8 +262,26 @@
 
 	map_header->mux_id = mux_id;
 
-	skb->protocol = htons(ETH_P_MAP);
+	if (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
+		int non_linear_skb;
 
+		if (rmnet_map_tx_agg_skip(skb, required_headroom))
+			goto done;
+
+		non_linear_skb = (orig_dev->features & NETIF_F_GSO) &&
+				 skb_is_nonlinear(skb);
+
+		if (non_linear_skb) {
+			if (unlikely(__skb_linearize(skb)))
+				goto done;
+		}
+
+		rmnet_map_tx_aggregate(skb, port);
+		return -EINPROGRESS;
+	}
+
+done:
+	skb->protocol = htons(ETH_P_MAP);
 	return 0;
 }
 
@@ -192,6 +312,8 @@
 	if (skb->pkt_type == PACKET_LOOPBACK)
 		return RX_HANDLER_PASS;
 
+	trace_rmnet_low(RMNET_MODULE, RMNET_RCV_FROM_PND, 0xDEF,
+			0xDEF, 0xDEF, 0xDEF, NULL, NULL);
 	dev = skb->dev;
 	port = rmnet_get_port(dev);
 
@@ -207,6 +329,7 @@
 done:
 	return RX_HANDLER_CONSUMED;
 }
+EXPORT_SYMBOL(rmnet_rx_handler);
 
 /* Modifies packet as per logical endpoint configuration and egress data format
  * for egress device configured in logical endpoint. Packet is then transmitted
@@ -218,7 +341,11 @@
 	struct rmnet_port *port;
 	struct rmnet_priv *priv;
 	u8 mux_id;
+	int err;
+	u32 skb_len;
 
+	trace_rmnet_low(RMNET_MODULE, RMNET_TX_UL_PKT, 0xDEF, 0xDEF, 0xDEF,
+			0xDEF, (void *)skb, NULL);
 	sk_pacing_shift_update(skb->sk, 8);
 
 	orig_dev = skb->dev;
@@ -230,10 +357,16 @@
 	if (!port)
 		goto drop;
 
-	if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev))
+	skb_len = skb->len;
+	err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
+	if (err == -ENOMEM) {
 		goto drop;
+	} else if (err == -EINPROGRESS) {
+		rmnet_vnd_tx_fixup(orig_dev, skb_len);
+		return;
+	}
 
-	rmnet_vnd_tx_fixup(skb, orig_dev);
+	rmnet_vnd_tx_fixup(orig_dev, skb_len);
 
 	dev_queue_xmit(skb);
 	return;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
index 3537e4c..8ce6fe6c 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
@@ -1,13 +1,5 @@
-/* Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved.
  *
  * RMNET Data ingress/egress handler
  *
@@ -19,7 +11,10 @@
 #include "rmnet_config.h"
 
 void rmnet_egress_handler(struct sk_buff *skb);
-
+void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
+void rmnet_set_skb_proto(struct sk_buff *skb);
+rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
+					       struct rmnet_port *port);
 rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
 
 #endif /* _RMNET_HANDLERS_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 884f1f5..645c3d5 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -1,17 +1,9 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */
 
 #ifndef _RMNET_MAP_H_
 #define _RMNET_MAP_H_
+#include "rmnet_config.h"
 
 struct rmnet_map_control_command {
 	u8  command_name;
@@ -34,6 +26,8 @@
 	RMNET_MAP_COMMAND_NONE,
 	RMNET_MAP_COMMAND_FLOW_DISABLE,
 	RMNET_MAP_COMMAND_FLOW_ENABLE,
+	RMNET_MAP_COMMAND_FLOW_START = 7,
+	RMNET_MAP_COMMAND_FLOW_END = 8,
 	/* These should always be the last 2 elements */
 	RMNET_MAP_COMMAND_UNKNOWN,
 	RMNET_MAP_COMMAND_ENUM_LENGTH
@@ -63,6 +57,60 @@
 	u16 csum_enabled:1;
 } __aligned(1);
 
+struct rmnet_map_control_command_header {
+	u8  command_name;
+	u8  cmd_type:2;
+	u8  reserved:6;
+	u16 reserved2;
+	u32 transaction_id;
+}  __aligned(1);
+
+struct rmnet_map_flow_info_le {
+	__be32 mux_id;
+	__be32 flow_id;
+	__be32 bytes;
+	__be32 pkts;
+} __aligned(1);
+
+struct rmnet_map_flow_info_be {
+	u32 mux_id;
+	u32 flow_id;
+	u32 bytes;
+	u32 pkts;
+} __aligned(1);
+
+struct rmnet_map_dl_ind_hdr {
+	union {
+		struct {
+			u32 seq;
+			u32 bytes;
+			u32 pkts;
+			u32 flows;
+			struct rmnet_map_flow_info_le flow[0];
+		} le __aligned(1);
+		struct {
+			__be32 seq;
+			__be32 bytes;
+			__be32 pkts;
+			__be32 flows;
+			struct rmnet_map_flow_info_be flow[0];
+		} be __aligned(1);
+	} __aligned(1);
+} __aligned(1);
+
+struct rmnet_map_dl_ind_trl {
+	union {
+		__be32 seq_be;
+		u32 seq_le;
+	} __aligned(1);
+} __aligned(1);
+
+struct rmnet_map_dl_ind {
+	void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *dlhdr);
+	void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *dltrl);
+	struct list_head list;
+};
+
 #define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header *) \
 				 (Y)->data)->mux_id)
 #define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header *) \
@@ -75,6 +123,9 @@
 #define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
 					(Y)->data)->pkt_len))
 
+#define RMNET_MAP_DEAGGR_SPACING  64
+#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+
 #define RMNET_MAP_COMMAND_REQUEST     0
 #define RMNET_MAP_COMMAND_ACK         1
 #define RMNET_MAP_COMMAND_UNSUPPORTED 2
@@ -91,5 +142,17 @@
 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
 				      struct net_device *orig_dev);
-
+int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
+void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
+int rmnet_map_flow_command(struct sk_buff *skb,
+			   struct rmnet_port *port,
+			   bool rmnet_perf);
+void rmnet_map_cmd_init(struct rmnet_port *port);
+int rmnet_map_dl_ind_register(struct rmnet_port *port,
+			      struct rmnet_map_dl_ind *dl_ind);
+int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
+				struct rmnet_map_dl_ind *dl_ind);
+void rmnet_map_cmd_exit(struct rmnet_port *port);
 #endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 3ee8ae9..79acd12 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -1,14 +1,5 @@
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. */
 
 #include <linux/netdevice.h>
 #include "rmnet_config.h"
@@ -16,6 +7,17 @@
 #include "rmnet_private.h"
 #include "rmnet_vnd.h"
 
+#define RMNET_DL_IND_HDR_SIZE (sizeof(struct rmnet_map_dl_ind_hdr) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+
+#define RMNET_MAP_CMD_SIZE (sizeof(struct rmnet_map_header) + \
+			    sizeof(struct rmnet_map_control_command_header))
+
+#define RMNET_DL_IND_TRL_SIZE (sizeof(struct rmnet_map_dl_ind_trl) + \
+			       sizeof(struct rmnet_map_header) + \
+			       sizeof(struct rmnet_map_control_command_header))
+
 static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
 				    struct rmnet_port *port,
 				    int enable)
@@ -83,6 +85,102 @@
 	netif_tx_unlock(dev);
 }
 
+static  void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
+				     struct rmnet_map_dl_ind_hdr *dlhdr)
+{
+	struct rmnet_map_dl_ind *tmp;
+
+	port->dl_marker_flush = 0;
+
+	list_for_each_entry(tmp, &port->dl_list, list)
+		tmp->dl_hdr_handler(dlhdr);
+}
+
+static  void rmnet_map_dl_trl_notify(struct rmnet_port *port,
+				     struct rmnet_map_dl_ind_trl *dltrl)
+{
+	struct rmnet_map_dl_ind *tmp;
+	struct napi_struct *napi;
+
+	list_for_each_entry(tmp, &port->dl_list, list)
+		tmp->dl_trl_handler(dltrl);
+
+	if (port->dl_marker_flush) {
+		napi = get_current_napi_context();
+		napi_gro_flush(napi, false);
+	}
+
+	port->dl_marker_flush = -1;
+}
+
+static void rmnet_map_process_flow_start(struct sk_buff *skb,
+					 struct rmnet_port *port,
+					 bool rmnet_perf)
+{
+	struct rmnet_map_dl_ind_hdr *dlhdr;
+
+	if (skb->len < RMNET_DL_IND_HDR_SIZE)
+		return;
+
+	skb_pull(skb, RMNET_MAP_CMD_SIZE);
+
+	dlhdr = (struct rmnet_map_dl_ind_hdr *)skb->data;
+
+	port->stats.dl_hdr_last_seq = dlhdr->le.seq;
+	port->stats.dl_hdr_last_bytes = dlhdr->le.bytes;
+	port->stats.dl_hdr_last_pkts = dlhdr->le.pkts;
+	port->stats.dl_hdr_last_flows = dlhdr->le.flows;
+	port->stats.dl_hdr_total_bytes += port->stats.dl_hdr_last_bytes;
+	port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
+	port->stats.dl_hdr_count++;
+
+	if (unlikely(!(port->stats.dl_hdr_count)))
+		port->stats.dl_hdr_count = 1;
+
+	port->stats.dl_hdr_avg_bytes = port->stats.dl_hdr_total_bytes /
+				       port->stats.dl_hdr_count;
+
+	port->stats.dl_hdr_avg_pkts = port->stats.dl_hdr_total_pkts /
+				      port->stats.dl_hdr_count;
+
+	rmnet_map_dl_hdr_notify(port, dlhdr);
+	if (rmnet_perf) {
+		unsigned int pull_size;
+
+		pull_size = sizeof(struct rmnet_map_dl_ind_hdr);
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
+		skb_pull(skb, pull_size);
+	}
+}
+
+static void rmnet_map_process_flow_end(struct sk_buff *skb,
+				       struct rmnet_port *port,
+				       bool rmnet_perf)
+{
+	struct rmnet_map_dl_ind_trl *dltrl;
+
+	if (skb->len < RMNET_DL_IND_TRL_SIZE)
+		return;
+
+	skb_pull(skb, RMNET_MAP_CMD_SIZE);
+
+	dltrl = (struct rmnet_map_dl_ind_trl *)skb->data;
+
+	port->stats.dl_trl_last_seq = dltrl->seq_le;
+	port->stats.dl_trl_count++;
+
+	rmnet_map_dl_trl_notify(port, dltrl);
+	if (rmnet_perf) {
+		unsigned int pull_size;
+
+		pull_size = sizeof(struct rmnet_map_dl_ind_trl);
+		if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+			pull_size += sizeof(struct rmnet_map_dl_csum_trailer);
+		skb_pull(skb, pull_size);
+	}
+}
+
 /* Process MAP command frame and send N/ACK message as appropriate. Message cmd
  * name is decoded here and appropriate handler is called.
  */
@@ -112,3 +210,81 @@
 	if (rc == RMNET_MAP_COMMAND_ACK)
 		rmnet_map_send_ack(skb, rc, port);
 }
+
+int rmnet_map_flow_command(struct sk_buff *skb, struct rmnet_port *port,
+			   bool rmnet_perf)
+{
+	struct rmnet_map_control_command *cmd;
+	unsigned char command_name;
+
+	cmd = RMNET_MAP_GET_CMD_START(skb);
+	command_name = cmd->command_name;
+
+	switch (command_name) {
+	case RMNET_MAP_COMMAND_FLOW_START:
+		rmnet_map_process_flow_start(skb, port, rmnet_perf);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_END:
+		rmnet_map_process_flow_end(skb, port, rmnet_perf);
+		break;
+
+	default:
+		return 1;
+	}
+
+	/* rmnet_perf module will handle the consuming */
+	if (!rmnet_perf)
+		consume_skb(skb);
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_flow_command);
+
+void rmnet_map_cmd_exit(struct rmnet_port *port)
+{
+	struct rmnet_map_dl_ind *tmp, *idx;
+
+	list_for_each_entry_safe(tmp, idx, &port->dl_list, list)
+		list_del_rcu(&tmp->list);
+}
+
+void rmnet_map_cmd_init(struct rmnet_port *port)
+{
+	INIT_LIST_HEAD(&port->dl_list);
+
+	port->dl_marker_flush = -1;
+}
+
+int rmnet_map_dl_ind_register(struct rmnet_port *port,
+			      struct rmnet_map_dl_ind *dl_ind)
+{
+	if (!port || !dl_ind || !dl_ind->dl_hdr_handler ||
+	    !dl_ind->dl_trl_handler)
+		return -EINVAL;
+
+	list_add_rcu(&dl_ind->list, &port->dl_list);
+
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_dl_ind_register);
+
+int rmnet_map_dl_ind_deregister(struct rmnet_port *port,
+				struct rmnet_map_dl_ind *dl_ind)
+{
+	struct rmnet_map_dl_ind *tmp;
+
+	if (!port || !dl_ind)
+		return -EINVAL;
+
+	list_for_each_entry(tmp, &port->dl_list, list) {
+		if (tmp == dl_ind) {
+			list_del_rcu(&dl_ind->list);
+			goto done;
+		}
+	}
+
+done:
+	return 0;
+}
+EXPORT_SYMBOL(rmnet_map_dl_ind_deregister);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 57a9c31..b6c5b1f 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * RMNET Data MAP protocol
  *
  */
@@ -20,9 +12,9 @@
 #include "rmnet_config.h"
 #include "rmnet_map.h"
 #include "rmnet_private.h"
+#include "rmnet_handlers.h"
 
-#define RMNET_MAP_DEAGGR_SPACING  64
-#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+#define RMNET_MAP_PKT_COPY_THRESHOLD 64
 
 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
 					 const void *txporthdr)
@@ -307,11 +299,34 @@
 }
 
 /* Deaggregates a single packet
- * A whole new buffer is allocated for each portion of an aggregated frame.
+ * A whole new buffer is allocated for each portion of an aggregated frame
+ * except when a UDP or command packet is received.
  * Caller should keep calling deaggregate() on the source skb until 0 is
  * returned, indicating that there are no more packets to deaggregate. Caller
  * is responsible for freeing the original skb.
  */
+static int rmnet_validate_clone(struct sk_buff *skb)
+{
+	if (RMNET_MAP_GET_CD_BIT(skb))
+		return 0;
+
+	if (skb->len < RMNET_MAP_PKT_COPY_THRESHOLD)
+		return 1;
+
+	switch (skb->data[4] & 0xF0) {
+	case 0x40:
+		if (((struct iphdr *)&skb->data[4])->protocol == IPPROTO_UDP)
+			return 0;
+		break;
+	case 0x60:
+		if (((struct ipv6hdr *)&skb->data[4])->nexthdr == IPPROTO_UDP)
+			return 0;
+		/* Fall through */
+	}
+
+	return 1;
+}
+
 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
 				      struct rmnet_port *port)
 {
@@ -335,13 +350,27 @@
 	if (ntohs(maph->pkt_len) == 0)
 		return NULL;
 
-	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
-	if (!skbn)
-		return NULL;
 
-	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
-	skb_put(skbn, packet_len);
-	memcpy(skbn->data, skb->data, packet_len);
+	if (rmnet_validate_clone(skb)) {
+		skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING,
+				 GFP_ATOMIC);
+		if (!skbn)
+			return NULL;
+
+		skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+		skb_put(skbn, packet_len);
+		memcpy(skbn->data, skb->data, packet_len);
+
+	} else {
+		skbn = skb_clone(skb, GFP_ATOMIC);
+		if (!skbn)
+			return NULL;
+
+		skb_trim(skbn, packet_len);
+		skbn->truesize = SKB_TRUESIZE(packet_len);
+		__skb_set_hash(skbn, 0, 0, 0);
+	}
+
 	skb_pull(skb, packet_len);
 
 	return skbn;
@@ -386,6 +415,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(rmnet_map_checksum_downlink_packet);
 
 /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
  * packets that are supported for UL checksum offload.
@@ -432,3 +462,222 @@
 
 	priv->stats.csum_sw++;
 }
+
+struct rmnet_agg_work {
+	struct work_struct work;
+	struct rmnet_port *port;
+};
+
+long rmnet_agg_time_limit __read_mostly = 1000000L;
+long rmnet_agg_bypass_time __read_mostly = 10000000L;
+
+int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset)
+{
+	u8 *packet_start = skb->data + offset;
+	int is_icmp = 0;
+
+	if (skb->protocol == htons(ETH_P_IP)) {
+		struct iphdr *ip4h = (struct iphdr *)(packet_start);
+
+		if (ip4h->protocol == IPPROTO_ICMP)
+			is_icmp = 1;
+	} else if (skb->protocol == htons(ETH_P_IPV6)) {
+		struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
+
+		if (ip6h->nexthdr == IPPROTO_ICMPV6) {
+			is_icmp = 1;
+		} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
+			struct frag_hdr *frag;
+
+			frag = (struct frag_hdr *)(packet_start
+						   + sizeof(struct ipv6hdr));
+			if (frag->nexthdr == IPPROTO_ICMPV6)
+				is_icmp = 1;
+		}
+	}
+
+	return is_icmp;
+}
+
+static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
+{
+	struct rmnet_agg_work *real_work;
+	struct rmnet_port *port;
+	unsigned long flags;
+	struct sk_buff *skb;
+	int agg_count = 0;
+
+	real_work = (struct rmnet_agg_work *)work;
+	port = real_work->port;
+	skb = NULL;
+
+	spin_lock_irqsave(&port->agg_lock, flags);
+	if (likely(port->agg_state == -EINPROGRESS)) {
+		/* Buffer may have already been shipped out */
+		if (likely(port->agg_skb)) {
+			skb = port->agg_skb;
+			agg_count = port->agg_count;
+			port->agg_skb = NULL;
+			port->agg_count = 0;
+			memset(&port->agg_time, 0, sizeof(struct timespec));
+		}
+		port->agg_state = 0;
+	}
+
+	spin_unlock_irqrestore(&port->agg_lock, flags);
+	if (skb)
+		dev_queue_xmit(skb);
+
+	kfree(work);
+}
+
+enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
+{
+	struct rmnet_agg_work *work;
+	struct rmnet_port *port;
+
+	port = container_of(t, struct rmnet_port, hrtimer);
+
+	work = kmalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work) {
+		port->agg_state = 0;
+
+		return HRTIMER_NORESTART;
+	}
+
+	INIT_WORK(&work->work, rmnet_map_flush_tx_packet_work);
+	work->port = port;
+	schedule_work((struct work_struct *)work);
+	return HRTIMER_NORESTART;
+}
+
+void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
+{
+	struct timespec diff, last;
+	int size, agg_count = 0;
+	struct sk_buff *agg_skb;
+	unsigned long flags;
+	u8 *dest_buff;
+
+new_packet:
+	spin_lock_irqsave(&port->agg_lock, flags);
+	memcpy(&last, &port->agg_last, sizeof(struct timespec));
+	getnstimeofday(&port->agg_last);
+
+	if (!port->agg_skb) {
+		/* Check to see if we should agg first. If the traffic is very
+		 * sparse, don't aggregate. We will need to tune this later
+		 */
+		diff = timespec_sub(port->agg_last, last);
+
+		if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time) {
+			spin_unlock_irqrestore(&port->agg_lock, flags);
+			skb->protocol = htons(ETH_P_MAP);
+			dev_queue_xmit(skb);
+			return;
+		}
+
+		size = port->egress_agg_size - skb->len;
+		port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
+		if (!port->agg_skb) {
+			port->agg_skb = 0;
+			port->agg_count = 0;
+			memset(&port->agg_time, 0, sizeof(struct timespec));
+			spin_unlock_irqrestore(&port->agg_lock, flags);
+			skb->protocol = htons(ETH_P_MAP);
+			dev_queue_xmit(skb);
+			return;
+		}
+		port->agg_skb->protocol = htons(ETH_P_MAP);
+		port->agg_count = 1;
+		getnstimeofday(&port->agg_time);
+		dev_kfree_skb_any(skb);
+		goto schedule;
+	}
+	diff = timespec_sub(port->agg_last, port->agg_time);
+
+	if (skb->len > (port->egress_agg_size - port->agg_skb->len) ||
+	    port->agg_count >= port->egress_agg_count ||
+	    diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_time_limit) {
+		agg_skb = port->agg_skb;
+		agg_count = port->agg_count;
+		port->agg_skb = 0;
+		port->agg_count = 0;
+		memset(&port->agg_time, 0, sizeof(struct timespec));
+		port->agg_state = 0;
+		spin_unlock_irqrestore(&port->agg_lock, flags);
+		hrtimer_cancel(&port->hrtimer);
+		dev_queue_xmit(agg_skb);
+		goto new_packet;
+	}
+
+	dest_buff = skb_put(port->agg_skb, skb->len);
+	memcpy(dest_buff, skb->data, skb->len);
+	port->agg_count++;
+	dev_kfree_skb_any(skb);
+
+schedule:
+	if (port->agg_state != -EINPROGRESS) {
+		port->agg_state = -EINPROGRESS;
+		hrtimer_start(&port->hrtimer, ns_to_ktime(3000000),
+			      HRTIMER_MODE_REL);
+	}
+	spin_unlock_irqrestore(&port->agg_lock, flags);
+}
+
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
+{
+	hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+	port->egress_agg_size = 8192;
+	port->egress_agg_count = 20;
+	spin_lock_init(&port->agg_lock);
+}
+
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
+{
+	unsigned long flags;
+
+	hrtimer_cancel(&port->hrtimer);
+	spin_lock_irqsave(&port->agg_lock, flags);
+	if (port->agg_state == -EINPROGRESS) {
+		if (port->agg_skb) {
+			kfree_skb(port->agg_skb);
+			port->agg_skb = NULL;
+			port->agg_count = 0;
+			memset(&port->agg_time, 0, sizeof(struct timespec));
+		}
+
+		port->agg_state = 0;
+	}
+
+	spin_unlock_irqrestore(&port->agg_lock, flags);
+}
+
+void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb)
+{
+	struct rmnet_port *port;
+	struct sk_buff *agg_skb;
+	unsigned long flags;
+
+	port = rmnet_get_port(qmap_skb->dev);
+
+	if (port && (port->data_format & RMNET_EGRESS_FORMAT_AGGREGATION)) {
+		spin_lock_irqsave(&port->agg_lock, flags);
+		if (port->agg_skb) {
+			agg_skb = port->agg_skb;
+			port->agg_skb = 0;
+			port->agg_count = 0;
+			memset(&port->agg_time, 0, sizeof(struct timespec));
+			port->agg_state = 0;
+			spin_unlock_irqrestore(&port->agg_lock, flags);
+			hrtimer_cancel(&port->hrtimer);
+			dev_queue_xmit(agg_skb);
+		} else {
+			spin_unlock_irqrestore(&port->agg_lock, flags);
+		}
+	}
+
+	dev_queue_xmit(qmap_skb);
+}
+EXPORT_SYMBOL(rmnet_map_tx_qmap_cmd);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index b9cc4f8..c30815b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -1,13 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 #ifndef _RMNET_PRIVATE_H_
@@ -18,6 +10,15 @@
 #define RMNET_NEEDED_HEADROOM      16
 #define RMNET_TX_QUEUE_LEN         1000
 
+/* Constants */
+#define RMNET_EGRESS_FORMAT_AGGREGATION         BIT(31)
+#define RMNET_INGRESS_FORMAT_DL_MARKER          BIT(30)
+#define RMNET_INGRESS_FORMAT_RPS_STAMP          BIT(29)
+
+/* Power save feature*/
+#define RMNET_INGRESS_FORMAT_PS                 BIT(27)
+#define RMNET_FORMAT_PS_NOTIF                   BIT(26)
+
 /* Replace skb->dev to a virtual rmnet device and pass up the stack */
 #define RMNET_EPMODE_VND (1)
 /* Pass the frame directly to another device with dev_queue_xmit() */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 0afc3d3..f6aedf0 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -1,21 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0
 /* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- *
  * RMNET Data virtual network driver
  *
  */
 
 #include <linux/etherdevice.h>
 #include <linux/if_arp.h>
+#include <linux/ip.h>
 #include <net/pkt_sched.h>
 #include "rmnet_config.h"
 #include "rmnet_handlers.h"
@@ -23,9 +15,12 @@
 #include "rmnet_map.h"
 #include "rmnet_vnd.h"
 
+#include <soc/qcom/qmi_rmnet.h>
+#include <trace/events/rmnet.h>
+
 /* RX/TX Fixup */
 
-void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
+void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len)
 {
 	struct rmnet_priv *priv = netdev_priv(dev);
 	struct rmnet_pcpu_stats *pcpu_ptr;
@@ -34,11 +29,11 @@
 
 	u64_stats_update_begin(&pcpu_ptr->syncp);
 	pcpu_ptr->stats.rx_pkts++;
-	pcpu_ptr->stats.rx_bytes += skb->len;
+	pcpu_ptr->stats.rx_bytes += skb_len;
 	u64_stats_update_end(&pcpu_ptr->syncp);
 }
 
-void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len)
 {
 	struct rmnet_priv *priv = netdev_priv(dev);
 	struct rmnet_pcpu_stats *pcpu_ptr;
@@ -47,7 +42,7 @@
 
 	u64_stats_update_begin(&pcpu_ptr->syncp);
 	pcpu_ptr->stats.tx_pkts++;
-	pcpu_ptr->stats.tx_bytes += skb->len;
+	pcpu_ptr->stats.tx_bytes += skb_len;
 	u64_stats_update_end(&pcpu_ptr->syncp);
 }
 
@@ -57,10 +52,19 @@
 					struct net_device *dev)
 {
 	struct rmnet_priv *priv;
+	int ip_type;
+	u32 mark;
+	unsigned int len;
 
 	priv = netdev_priv(dev);
 	if (priv->real_dev) {
+		ip_type = (ip_hdr(skb)->version == 4) ?
+					AF_INET : AF_INET6;
+		mark = skb->mark;
+		len = skb->len;
+		trace_rmnet_xmit_skb(skb);
 		rmnet_egress_handler(skb);
+		qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
 	} else {
 		this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
 		kfree_skb(skb);
@@ -105,9 +109,15 @@
 static void rmnet_vnd_uninit(struct net_device *dev)
 {
 	struct rmnet_priv *priv = netdev_priv(dev);
+	void *qos;
 
 	gro_cells_destroy(&priv->gro_cells);
 	free_percpu(priv->pcpu_stats);
+
+	qos = priv->qos_info;
+	RCU_INIT_POINTER(priv->qos_info, NULL);
+	synchronize_rcu();
+	qmi_rmnet_qos_exit(dev, qos);
 }
 
 static void rmnet_get_stats64(struct net_device *dev,
@@ -141,6 +151,20 @@
 	s->tx_dropped = total_stats.tx_drops;
 }
 
+static u16 rmnet_vnd_select_queue(struct net_device *dev,
+				  struct sk_buff *skb,
+				  struct net_device *sb_dev,
+				  select_queue_fallback_t fallback)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	int txq = 0;
+
+	if (priv->real_dev)
+		txq = qmi_rmnet_get_queue(dev, skb);
+
+	return (txq < dev->real_num_tx_queues) ? txq : 0;
+}
+
 static const struct net_device_ops rmnet_vnd_ops = {
 	.ndo_start_xmit = rmnet_vnd_start_xmit,
 	.ndo_change_mtu = rmnet_vnd_change_mtu,
@@ -150,6 +174,7 @@
 	.ndo_init       = rmnet_vnd_init,
 	.ndo_uninit     = rmnet_vnd_uninit,
 	.ndo_get_stats64 = rmnet_get_stats64,
+	.ndo_select_queue = rmnet_vnd_select_queue,
 };
 
 static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
@@ -164,12 +189,29 @@
 	"Checksum computed in software",
 };
 
+static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"DL header last seen sequence",
+	"DL header last seen bytes",
+	"DL header last seen packets",
+	"DL header last seen flows",
+	"DL header pkts received",
+	"DL header total bytes received",
+	"DL header total pkts received",
+	"DL header average bytes",
+	"DL header average packets",
+	"DL trailer last seen sequence",
+	"DL trailer pkts received",
+};
+
 static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 {
 	switch (stringset) {
 	case ETH_SS_STATS:
 		memcpy(buf, &rmnet_gstrings_stats,
 		       sizeof(rmnet_gstrings_stats));
+		memcpy(buf + sizeof(rmnet_gstrings_stats),
+		       &rmnet_port_gstrings_stats,
+		       sizeof(rmnet_port_gstrings_stats));
 		break;
 	}
 }
@@ -178,7 +220,8 @@
 {
 	switch (sset) {
 	case ETH_SS_STATS:
-		return ARRAY_SIZE(rmnet_gstrings_stats);
+		return ARRAY_SIZE(rmnet_gstrings_stats) +
+		       ARRAY_SIZE(rmnet_port_gstrings_stats);
 	default:
 		return -EOPNOTSUPP;
 	}
@@ -189,17 +232,42 @@
 {
 	struct rmnet_priv *priv = netdev_priv(dev);
 	struct rmnet_priv_stats *st = &priv->stats;
+	struct rmnet_port_priv_stats *stp;
+	struct rmnet_port *port;
 
-	if (!data)
+	port = rmnet_get_port(priv->real_dev);
+
+	if (!data || !port)
 		return;
 
+	stp = &port->stats;
+
 	memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
+	memcpy(data + ARRAY_SIZE(rmnet_gstrings_stats), stp,
+	       ARRAY_SIZE(rmnet_port_gstrings_stats) * sizeof(u64));
+}
+
+static int rmnet_stats_reset(struct net_device *dev)
+{
+	struct rmnet_priv *priv = netdev_priv(dev);
+	struct rmnet_port_priv_stats *stp;
+	struct rmnet_port *port;
+
+	port = rmnet_get_port(priv->real_dev);
+	if (!port)
+		return -EINVAL;
+
+	stp = &port->stats;
+
+	memset(stp, 0, sizeof(*stp));
+	return 0;
 }
 
 static const struct ethtool_ops rmnet_ethtool_ops = {
 	.get_ethtool_stats = rmnet_get_ethtool_stats,
 	.get_strings = rmnet_get_strings,
 	.get_sset_count = rmnet_get_sset_count,
+	.nway_reset = rmnet_stats_reset,
 };
 
 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
@@ -210,7 +278,7 @@
 	rmnet_dev->netdev_ops = &rmnet_vnd_ops;
 	rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
 	rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
-	eth_random_addr(rmnet_dev->dev_addr);
+	random_ether_addr(rmnet_dev->dev_addr);
 	rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
 
 	/* Raw IP mode */
@@ -258,6 +326,7 @@
 		priv = netdev_priv(rmnet_dev);
 		priv->mux_id = id;
 		priv->real_dev = real_dev;
+		priv->qos_info = qmi_rmnet_qos_init(real_dev, id);
 
 		netdev_dbg(rmnet_dev, "rmnet dev created\n");
 	}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index 71e4c32..719e8d70 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -1,13 +1,5 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
  *
  * RMNET Data Virtual Network Device APIs
  *
@@ -23,8 +15,8 @@
 		      struct rmnet_endpoint *ep);
 int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
 		      struct rmnet_endpoint *ep);
-void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
-void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
+void rmnet_vnd_rx_fixup(struct net_device *dev, u32 skb_len);
+void rmnet_vnd_tx_fixup(struct net_device *dev, u32 skb_len);
 u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
 void rmnet_vnd_setup(struct net_device *dev);
 #endif /* _RMNET_VND_H_ */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 467b51b..e1bb2d9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1838,6 +1838,10 @@
 		return -EINVAL;
 	}
 
+	if (!(tun->flags & IFF_NO_PI))
+		if (pi.flags & htons(CHECKSUM_UNNECESSARY))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+
 	switch (tun->flags & TUN_TYPE_MASK) {
 	case IFF_TUN:
 		if (tun->flags & IFF_NO_PI) {
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index d4c2e42..c0db71f 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -10,3 +10,5 @@
 source "drivers/platform/chrome/Kconfig"
 
 source "drivers/platform/mellanox/Kconfig"
+
+source "drivers/platform/msm/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index 4b2ce58..0fe9f8d 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -9,3 +9,4 @@
 obj-$(CONFIG_OLPC)		+= olpc/
 obj-$(CONFIG_GOLDFISH)		+= goldfish/
 obj-$(CONFIG_CHROME_PLATFORMS)	+= chrome/
+obj-$(CONFIG_ARCH_QCOM)		+= msm/
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 1581f6a..9e529d0 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -294,6 +294,7 @@
 		pwm->pwm = chip->base + i;
 		pwm->hwpwm = i;
 		pwm->state.polarity = polarity;
+		pwm->state.output_type = PWM_OUTPUT_FIXED;
 
 		if (chip->ops->get_state)
 			chip->ops->get_state(chip, pwm, &pwm->state);
@@ -507,11 +508,46 @@
 			pwm->state.polarity = state->polarity;
 		}
 
+		if (state->output_type != pwm->state.output_type) {
+			if (!pwm->chip->ops->set_output_type)
+				return -ENOTSUPP;
+
+			err = pwm->chip->ops->set_output_type(pwm->chip, pwm,
+						state->output_type);
+			if (err)
+				return err;
+
+			pwm->state.output_type = state->output_type;
+		}
+
+		if (state->output_pattern != pwm->state.output_pattern &&
+				state->output_pattern != NULL) {
+			if (!pwm->chip->ops->set_output_pattern)
+				return -ENOTSUPP;
+
+			err = pwm->chip->ops->set_output_pattern(pwm->chip,
+					pwm, state->output_pattern);
+			if (err)
+				return err;
+
+			pwm->state.output_pattern = state->output_pattern;
+		}
+
 		if (state->period != pwm->state.period ||
 		    state->duty_cycle != pwm->state.duty_cycle) {
-			err = pwm->chip->ops->config(pwm->chip, pwm,
-						     state->duty_cycle,
-						     state->period);
+			if (pwm->chip->ops->config_extend) {
+				err = pwm->chip->ops->config_extend(pwm->chip,
+						pwm, state->duty_cycle,
+						state->period);
+			} else {
+				if (state->period > UINT_MAX)
+					pr_warn("period %llu duty_cycle %llu will be truncated\n",
+							state->period,
+							state->duty_cycle);
+				err = pwm->chip->ops->config(pwm->chip, pwm,
+						state->duty_cycle,
+						state->period);
+			}
 			if (err)
 				return err;
 
@@ -995,8 +1031,8 @@
 		if (state.enabled)
 			seq_puts(s, " enabled");
 
-		seq_printf(s, " period: %u ns", state.period);
-		seq_printf(s, " duty: %u ns", state.duty_cycle);
+		seq_printf(s, " period: %llu ns", state.period);
+		seq_printf(s, " duty: %llu ns", state.duty_cycle);
 		seq_printf(s, " polarity: %s",
 			   state.polarity ? "inverse" : "normal");
 
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 7c71cdb..891fc1d 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -50,7 +50,7 @@
 
 	pwm_get_state(pwm, &state);
 
-	return sprintf(buf, "%u\n", state.period);
+	return sprintf(buf, "%llu\n", state.period);
 }
 
 static ssize_t period_store(struct device *child,
@@ -85,7 +85,7 @@
 
 	pwm_get_state(pwm, &state);
 
-	return sprintf(buf, "%u\n", state.duty_cycle);
+	return sprintf(buf, "%llu\n", state.duty_cycle);
 }
 
 static ssize_t duty_cycle_store(struct device *child,
@@ -220,7 +220,55 @@
 	if (ret)
 		return ret;
 
-	return sprintf(buf, "%u %u\n", result.period, result.duty_cycle);
+	return sprintf(buf, "%llu %llu\n", result.period, result.duty_cycle);
+}
+
+static ssize_t output_type_show(struct device *child,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	const struct pwm_device *pwm = child_to_pwm_device(child);
+	const char *output_type = "unknown";
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+	switch (state.output_type) {
+	case PWM_OUTPUT_FIXED:
+		output_type = "fixed";
+		break;
+	case PWM_OUTPUT_MODULATED:
+		output_type = "modulated";
+		break;
+	default:
+		break;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", output_type);
+}
+
+static ssize_t output_type_store(struct device *child,
+			      struct device_attribute *attr,
+			      const char *buf, size_t size)
+{
+	struct pwm_export *export = child_to_pwm_export(child);
+	struct pwm_device *pwm = export->pwm;
+	struct pwm_state state;
+	int ret = -EINVAL;
+
+	mutex_lock(&export->lock);
+	pwm_get_state(pwm, &state);
+	if (sysfs_streq(buf, "fixed"))
+		state.output_type = PWM_OUTPUT_FIXED;
+	else if (sysfs_streq(buf, "modulated"))
+		state.output_type = PWM_OUTPUT_MODULATED;
+	else
+		goto unlock;
+
+	ret = pwm_apply_state(pwm, &state);
+unlock:
+	mutex_unlock(&export->lock);
+
+	return ret ? : size;
 }
 
 static DEVICE_ATTR_RW(period);
@@ -228,6 +276,7 @@
 static DEVICE_ATTR_RW(enable);
 static DEVICE_ATTR_RW(polarity);
 static DEVICE_ATTR_RO(capture);
+static DEVICE_ATTR_RW(output_type);
 
 static struct attribute *pwm_attrs[] = {
 	&dev_attr_period.attr,
@@ -235,6 +284,7 @@
 	&dev_attr_enable.attr,
 	&dev_attr_polarity.attr,
 	&dev_attr_capture.attr,
+	&dev_attr_output_type.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(pwm);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 14cd691..f7867e2 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -100,14 +100,44 @@
 	  low power modes.
 
 config QCOM_QMI_HELPERS
-	tristate
-	depends on ARCH_QCOM && NET
+	bool "QTI QMI Helpers"
+	depends on QRTR
 	help
 	  Helper library for handling QMI encoded messages.  QMI encoded
 	  messages are used in communication between the majority of QRTR
 	  clients and this helpers provide the common functionality needed for
 	  doing this from a kernel driver.
 
+config QCOM_QMI_RMNET
+	bool "QTI QMI Rmnet Helpers"
+	depends on QCOM_QMI_HELPERS
+	depends on RMNET
+	help
+	  Helper for handling interfaces between kernel clients and rmnet
+	  driver. It enables the rmnet driver to create/delete DFC/WDA
+	  clients and provides the common functionality for data flow control
+	  and power save features.
+
+config QCOM_QMI_DFC
+	bool "Enable burst mode flow control"
+	depends on QCOM_QMI_RMNET
+	help
+	  Say y here to enable support for burst mode data flow control.
+	  DFC client provides an interface to the modem dfc service and
+	  does burst mode flow control. It enables the flow on receiving flow
+	  status indication and disables flows while grant size is reached.
+	  If unsure or not use burst mode flow control, say 'N'.
+
+config QCOM_QMI_POWER_COLLAPSE
+	bool "Enable power save features"
+	depends on QCOM_QMI_RMNET
+	help
+	  Say y here to enable support for power save features.
+	  It provides an interface to offload uplink flow control based on
+	  detected flow status.
+	  If unsure or not use power save feature, say 'N'.
+
+
 config QCOM_RMTFS_MEM
 	tristate "Qualcomm Remote Filesystem memory driver"
 	depends on ARCH_QCOM
@@ -411,6 +441,25 @@
 	  from logical nodes to hardware nodes controlled by the BCM (Bus
 	  Clock Manager)
 
+config QSEE_IPC_IRQ
+	bool "QSEE interrupt manager"
+	help
+	  The QSEE IPC IRQ controller manages the interrupts from the QTI
+	  secure execution environment. This interrupt controller will use
+	  the registers in the spcs region to mask and clear interrupts.
+	  Clients can use this driver to avoid adding common interrupt handling
+	  code.
+
+config QSEE_IPC_IRQ_BRIDGE
+	tristate "QSEE IPC Interrupt Bridge"
+	select QSEE_IPC_IRQ
+	help
+	  This module enables bridging an Inter-Processor Communication(IPC)
+	  interrupt from a remote subsystem directed towards
+	  Qualcomm Technologies, Inc. Secure Execution Environment(QSEE) to
+	  userspace. The interrupt will be propagated through a character device
+	  that userspace clients can poll on.
+
 config QCOM_GLINK
 	tristate "GLINK Probe Helper"
 	depends on RPMSG_QCOM_GLINK_SMEM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index addf21d..800b6a4 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -9,6 +9,9 @@
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
 obj-$(CONFIG_QCOM_QMI_HELPERS)	+= qmi_helpers.o
 qmi_helpers-y	+= qmi_encdec.o qmi_interface.o
+obj-$(CONFIG_QCOM_QMI_RMNET)	+= qmi_rmnet.o
+obj-$(CONFIG_QCOM_QMI_DFC)	+= dfc_qmi.o
+obj-$(CONFIG_QCOM_QMI_POWER_COLLAPSE) += wda_qmi.o
 obj-$(CONFIG_QCOM_RMTFS_MEM)	+= rmtfs_mem.o
 obj-$(CONFIG_QCOM_RPMH)		+= qcom_rpmh.o
 qcom_rpmh-y			+= rpmh-rsc.o
@@ -51,3 +54,5 @@
 obj-$(CONFIG_QCOM_FSA4480_I2C) += fsa4480-i2c.o
 obj-$(CONFIG_QCOM_GLINK) += glink_probe.o
 obj-$(CONFIG_QCOM_GLINK_PKT) += glink_pkt.o
+obj-$(CONFIG_QSEE_IPC_IRQ) += qsee_ipc_irq.o
+obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index f064750..a6532a7 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -8,6 +8,9 @@
 #include <linux/of_reserved_mem.h>
 #include <linux/platform_device.h>
 #include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
 
 #include <soc/qcom/cmd-db.h>
 
@@ -283,6 +286,107 @@
 }
 EXPORT_SYMBOL(cmd_db_is_standalone);
 
+static void *cmd_db_start(struct seq_file *m, loff_t *pos)
+{
+	int slv_idx, ent_idx, cnt;
+	struct entry_header *ent;
+	int total = 0;
+
+	for (slv_idx = 0; slv_idx < MAX_SLV_ID; slv_idx++) {
+		cnt = le16_to_cpu(cmd_db_header->header[slv_idx].cnt);
+		if (!cnt)
+			continue;
+		ent_idx = *pos - total;
+		if (ent_idx < cnt)
+			break;
+		total += cnt;
+	}
+
+	if (slv_idx == MAX_SLV_ID)
+		return NULL;
+
+	ent = (void *)cmd_db_header->data +
+	      le16_to_cpu(cmd_db_header->header[slv_idx].header_offset);
+
+	return &ent[ent_idx];
+}
+
+static void cmd_db_stop(struct seq_file *m, void *v)
+{
+}
+
+static void *cmd_db_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	(*pos)++;
+	return cmd_db_start(m, pos);
+}
+
+static int cmd_db_seq_show(struct seq_file *m, void *v)
+{
+	struct entry_header *eh = v;
+	char buf[9] = {0};
+	int eh_addr, eh_len, eh_offset;
+
+	if (!eh)
+		return 0;
+
+	memcpy(buf, &eh->id, sizeof(eh->id));
+	eh_addr = le32_to_cpu(eh->addr);
+	eh_len = le16_to_cpu(eh->len);
+	eh_offset = le16_to_cpu(eh->offset);
+	seq_printf(m, "Address: 0x%05x, id: %s", eh_addr, buf);
+
+	if (eh_len) {
+		int slv_id = (eh_addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
+		u8 aux[32] = {0};
+		int len, offset;
+		int k;
+
+		len = min_t(u32, eh_len, sizeof(aux));
+		for (k = 0; k < MAX_SLV_ID; k++) {
+			struct rsc_hdr *hdr  = &cmd_db_header->header[k];
+
+			if (le16_to_cpu(hdr->slv_id) == slv_id)
+				break;
+		}
+
+		if (k == MAX_SLV_ID)
+			return -EINVAL;
+
+		offset = le16_to_cpu(cmd_db_header->header[k].data_offset);
+		memcpy(aux, cmd_db_header->data + offset + eh_offset, len);
+
+		seq_puts(m, ", aux data: ");
+
+		for (k = 0; k < len; k++)
+			seq_printf(m, "%02x ", aux[k]);
+
+	}
+	seq_puts(m, "\n");
+
+	return 0;
+}
+
+static const struct seq_operations cmd_db_seq_ops = {
+	.start = cmd_db_start,
+	.stop = cmd_db_stop,
+	.next = cmd_db_next,
+	.show = cmd_db_seq_show,
+};
+
+static int cmd_db_file_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &cmd_db_seq_ops);
+}
+
+static const struct file_operations cmd_db_fops = {
+	.owner = THIS_MODULE,
+	.open = cmd_db_file_open,
+	.read = seq_read,
+	.release = seq_release,
+	.llseek = no_llseek,
+};
+
 static int cmd_db_dev_probe(struct platform_device *pdev)
 {
 	struct reserved_mem *rmem;
@@ -309,6 +413,9 @@
 	if (cmd_db_is_standalone() == 1)
 		pr_info("Command DB is initialized in standalone mode.\n");
 
+	if (!debugfs_create_file("cmd_db", 0444, NULL, NULL, &cmd_db_fops))
+		pr_err("Couldn't create debugfs\n");
+
 	return 0;
 }
 
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
new file mode 100644
index 0000000..b6d02b8
--- /dev/null
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/rtnetlink.h>
+#include <net/pkt_sched.h>
+#include <linux/soc/qcom/qmi.h>
+#include <soc/qcom/rmnet_qmi.h>
+
+#include "qmi_rmnet_i.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/dfc.h>
+
+#define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
+
+#define DFC_MAX_BEARERS_V01 16
+#define DFC_MAX_QOS_ID_V01 2
+
+#define DFC_ACK_TYPE_DISABLE 1
+#define DFC_ACK_TYPE_THRESHOLD 2
+
+struct dfc_qmap_header {
+	u8  pad_len:6;
+	u8  reserved_bit:1;
+	u8  cd_bit:1;
+	u8  mux_id;
+	__be16   pkt_len;
+} __aligned(1);
+
+struct dfc_ack_cmd {
+	struct dfc_qmap_header header;
+	u8  command_name;
+	u8  cmd_type:2;
+	u8  reserved:6;
+	u16 reserved2;
+	u32 transaction_id;
+	u8  ver:2;
+	u8  reserved3:6;
+	u8  type:2;
+	u8  reserved4:6;
+	u16 dfc_seq;
+	u8  reserved5[3];
+	u8  bearer_id;
+} __aligned(1);
+
+struct dfc_qmi_data {
+	void *rmnet_port;
+	struct workqueue_struct *dfc_wq;
+	struct work_struct svc_arrive;
+	struct qmi_handle handle;
+	struct sockaddr_qrtr ssctl;
+	int index;
+	int restart_state;
+};
+
+static void dfc_svc_init(struct work_struct *work);
+static void dfc_do_burst_flow_control(struct work_struct *work);
+
+/* **************************************************** */
+#define DFC_SERVICE_ID_V01 0x4E
+#define DFC_SERVICE_VERS_V01 0x01
+#define DFC_TIMEOUT_MS 10000
+
+#define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
+#define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
+#define QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN  11
+#define QMI_DFC_BIND_CLIENT_RESP_V01_MAX_MSG_LEN 7
+
+#define QMI_DFC_INDICATION_REGISTER_REQ_V01 0x0001
+#define QMI_DFC_INDICATION_REGISTER_RESP_V01 0x0001
+#define QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN 4
+#define QMI_DFC_INDICATION_REGISTER_RESP_V01_MAX_MSG_LEN 7
+
+#define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
+#define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 540
+
+struct dfc_bind_client_req_msg_v01 {
+	u8 ep_id_valid;
+	struct data_ep_id_type_v01 ep_id;
+};
+
+struct dfc_bind_client_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+struct dfc_indication_register_req_msg_v01 {
+	u8 report_flow_status_valid;
+	u8 report_flow_status;
+};
+
+struct dfc_indication_register_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+enum dfc_ip_type_enum_v01 {
+	DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	DFC_IPV4_TYPE_V01 = 0x4,
+	DFC_IPV6_TYPE_V01 = 0x6,
+	DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+struct dfc_qos_id_type_v01 {
+	u32 qos_id;
+	enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_flow_status_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	u32 num_bytes;
+	u16 seq_num;
+	u8 qos_ids_len;
+	struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
+};
+
+static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct dfc_qos_id_type_v01,
+					   qos_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum dfc_ip_type_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct dfc_qos_id_type_v01,
+					   ip_type),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   subs_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   mux_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   bearer_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   num_bytes),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u16),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   seq_num),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   qos_ids_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_QOS_ID_V01,
+		.elem_size	= sizeof(struct dfc_qos_id_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_flow_status_info_type_v01,
+					   qos_ids),
+		.ei_array	= dfc_qos_id_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct dfc_ancillary_info_type_v01 {
+	u8 subs_id;
+	u8 mux_id;
+	u8 bearer_id;
+	u32 reserved;
+};
+
+static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_ancillary_info_type_v01,
+					   subs_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_ancillary_info_type_v01,
+					   mux_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_ancillary_info_type_v01,
+					   bearer_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					   dfc_ancillary_info_type_v01,
+					   reserved),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct dfc_flow_status_ind_msg_v01 {
+	u8 flow_status_valid;
+	u8 flow_status_len;
+	struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+	u8 eod_ack_reqd_valid;
+	u8 eod_ack_reqd;
+	u8 ancillary_info_valid;
+	u8 ancillary_info_len;
+	struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_svc_ind {
+	struct work_struct work;
+	struct dfc_qmi_data *data;
+	struct dfc_flow_status_ind_msg_v01 dfc_info;
+};
+
+static struct qmi_elem_info dfc_bind_client_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct dfc_bind_client_req_msg_v01,
+					   ep_id_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct data_ep_id_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct dfc_bind_client_req_msg_v01,
+					   ep_id),
+		.ei_array	= data_ep_id_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_bind_client_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct dfc_bind_client_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_indication_register_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_indication_register_req_msg_v01,
+					   report_flow_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_indication_register_req_msg_v01,
+					   report_flow_status),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_indication_register_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					   dfc_indication_register_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info dfc_flow_status_ind_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   flow_status_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   flow_status_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_flow_status_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   flow_status),
+		.ei_array	= dfc_flow_status_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   eod_ack_reqd_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   eod_ack_reqd),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   ancillary_info_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u8),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   ancillary_info_len),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= DFC_MAX_BEARERS_V01,
+		.elem_size	= sizeof(struct
+					 dfc_ancillary_info_type_v01),
+		.array_type	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(struct
+					   dfc_flow_status_ind_msg_v01,
+					   ancillary_info),
+		.ei_array	= dfc_ancillary_info_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static int
+dfc_bind_client_req(struct qmi_handle *dfc_handle,
+		    struct sockaddr_qrtr *ssctl, struct svc_info *svc)
+{
+	struct dfc_bind_client_resp_msg_v01 *resp;
+	struct dfc_bind_client_req_msg_v01 *req;
+	struct qmi_txn txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(dfc_handle, &txn,
+			   dfc_bind_client_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	req->ep_id_valid = 1;
+	req->ep_id.ep_type = svc->ep_type;
+	req->ep_id.iface_id = svc->iface_id;
+	ret = qmi_send_request(dfc_handle, ssctl, &txn,
+			       QMI_DFC_BIND_CLIENT_REQ_V01,
+			       QMI_DFC_BIND_CLIENT_REQ_V01_MAX_MSG_LEN,
+			       dfc_bind_client_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static int
+dfc_indication_register_req(struct qmi_handle *dfc_handle,
+			    struct sockaddr_qrtr *ssctl, u8 reg)
+{
+	struct dfc_indication_register_resp_msg_v01 *resp;
+	struct dfc_indication_register_req_msg_v01 *req;
+	struct qmi_txn txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(dfc_handle, &txn,
+			   dfc_indication_register_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	req->report_flow_status_valid = 1;
+	req->report_flow_status = reg;
+	ret = qmi_send_request(dfc_handle, ssctl, &txn,
+			       QMI_DFC_INDICATION_REGISTER_REQ_V01,
+			       QMI_DFC_INDICATION_REGISTER_REQ_V01_MAX_MSG_LEN,
+			       dfc_indication_register_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static int dfc_init_service(struct dfc_qmi_data *data, struct qmi_info *qmi)
+{
+	int rc;
+
+	rc = dfc_bind_client_req(&data->handle, &data->ssctl,
+				 &qmi->fc_info[data->index].svc);
+	if (rc < 0)
+		return rc;
+
+	return dfc_indication_register_req(&data->handle, &data->ssctl, 1);
+}
+
+static void
+dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
+{
+	struct qos_info *qos = rmnet_get_qos_pt(dev);
+	struct sk_buff *skb;
+	struct dfc_ack_cmd *msg;
+	int data_size = sizeof(struct dfc_ack_cmd);
+	int header_size = sizeof(struct dfc_qmap_header);
+
+	if (!qos)
+		return;
+
+	skb = alloc_skb(data_size, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	msg = (struct dfc_ack_cmd *)skb_put(skb, data_size);
+	memset(msg, 0, data_size);
+
+	msg->header.cd_bit = 1;
+	msg->header.mux_id = mux_id;
+	msg->header.pkt_len = htons(data_size - header_size);
+
+	msg->bearer_id = bearer_id;
+	msg->command_name = 4;
+	msg->cmd_type = 0;
+	msg->dfc_seq = htons(seq);
+	msg->type = type;
+	msg->ver = 2;
+	msg->transaction_id = htonl(qos->tran_num);
+
+	skb->dev = qos->real_dev;
+	skb->protocol = htons(ETH_P_MAP);
+
+	trace_dfc_qmap_cmd(mux_id, bearer_id, seq, type, qos->tran_num);
+	qos->tran_num++;
+
+	rmnet_map_tx_qmap_cmd(skb);
+}
+
+static int dfc_bearer_flow_ctl(struct net_device *dev,
+			       struct rmnet_bearer_map *bearer,
+			       struct qos_info *qos)
+{
+	struct list_head *p;
+	struct rmnet_flow_map *itm;
+	int rc = 0, qlen;
+	int enable;
+
+	enable = bearer->grant_size ? 1 : 0;
+
+	list_for_each(p, &qos->flow_head) {
+		itm = list_entry(p, struct rmnet_flow_map, list);
+
+		if (itm->bearer_id == bearer->bearer_id) {
+			/*
+			 * Do not flow disable ancillary q if ancillary is true
+			 */
+			if (bearer->ancillary && enable == 0 &&
+					DFC_IS_ANCILLARY(itm->ip_type))
+				continue;
+
+			qlen = qmi_rmnet_flow_control(dev, itm->tcm_handle,
+						    enable);
+			trace_dfc_qmi_tc(dev->name, itm->bearer_id,
+					 itm->flow_id, bearer->grant_size,
+					 qlen, itm->tcm_handle, enable);
+			rc++;
+		}
+	}
+
+	if (enable == 0 && bearer->ack_req)
+		dfc_send_ack(dev, bearer->bearer_id,
+			     bearer->seq, qos->mux_id,
+			     DFC_ACK_TYPE_DISABLE);
+
+	return rc;
+}
+
+static int dfc_all_bearer_flow_ctl(struct net_device *dev,
+				struct qos_info *qos, u8 ack_req, u32 ancillary,
+				struct dfc_flow_status_info_type_v01 *fc_info)
+{
+	struct list_head *p;
+	struct rmnet_bearer_map *bearer_itm = NULL;
+	int enable;
+
+	list_for_each(p, &qos->bearer_head) {
+		bearer_itm = list_entry(p, struct rmnet_bearer_map, list);
+
+		bearer_itm->grant_size = fc_info->num_bytes;
+		bearer_itm->grant_thresh =
+			qmi_rmnet_grant_per(bearer_itm->grant_size);
+		bearer_itm->seq = fc_info->seq_num;
+		bearer_itm->ack_req = ack_req;
+		bearer_itm->ancillary = ancillary;
+	}
+
+	enable = fc_info->num_bytes > 0 ? 1 : 0;
+
+	if (enable)
+		netif_tx_wake_all_queues(dev);
+	else
+		netif_tx_stop_all_queues(dev);
+
+	trace_dfc_qmi_tc(dev->name, 0xFF, 0, fc_info->num_bytes, 0, 0, enable);
+
+	if (enable == 0 && ack_req)
+		dfc_send_ack(dev, fc_info->bearer_id,
+			     fc_info->seq_num, fc_info->mux_id,
+			     DFC_ACK_TYPE_DISABLE);
+
+	return 0;
+}
+
+static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
+			     u8 ack_req, u32 ancillary,
+			     struct dfc_flow_status_info_type_v01 *fc_info)
+{
+	struct rmnet_bearer_map *itm = NULL;
+	int rc = 0;
+	int action = -1;
+
+	itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
+	if (itm) {
+		if (itm->grant_size == 0 && fc_info->num_bytes > 0)
+			action = 1;
+		else if (itm->grant_size > 0 && fc_info->num_bytes == 0)
+			action = 0;
+
+		itm->grant_size = fc_info->num_bytes;
+		itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
+		itm->seq = fc_info->seq_num;
+		itm->ack_req = ack_req;
+		itm->ancillary = ancillary;
+
+		if (action != -1)
+			rc = dfc_bearer_flow_ctl(dev, itm, qos);
+	} else {
+		pr_debug("grant %u before flow activate\n", fc_info->num_bytes);
+		qos->default_grant = fc_info->num_bytes;
+	}
+	return rc;
+}
+
+static void dfc_do_burst_flow_control(struct work_struct *work)
+{
+	struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work;
+	struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->dfc_info;
+	struct net_device *dev;
+	struct qos_info *qos;
+	struct dfc_flow_status_info_type_v01 *flow_status;
+	struct dfc_ancillary_info_type_v01 *ai;
+	u8 ack_req = ind->eod_ack_reqd_valid ? ind->eod_ack_reqd : 0;
+	u32 ancillary;
+	int i, j;
+
+	if (unlikely(svc_ind->data->restart_state)) {
+		kfree(svc_ind);
+		return;
+	}
+
+	rcu_read_lock();
+
+	for (i = 0; i < ind->flow_status_len; i++) {
+		flow_status = &ind->flow_status[i];
+
+		ancillary = 0;
+		if (ind->ancillary_info_valid) {
+			for (j = 0; j < ind->ancillary_info_len; j++) {
+				ai = &ind->ancillary_info[j];
+				if (ai->mux_id == flow_status->mux_id &&
+				    ai->bearer_id == flow_status->bearer_id) {
+					ancillary = ai->reserved;
+					break;
+				}
+			}
+		}
+
+		trace_dfc_flow_ind(svc_ind->data->index,
+				   i, flow_status->mux_id,
+				   flow_status->bearer_id,
+				   flow_status->num_bytes,
+				   flow_status->seq_num,
+				   ack_req,
+				   ancillary);
+
+		dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port,
+					  flow_status->mux_id);
+		if (!dev)
+			goto clean_out;
+
+		qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+		if (!qos)
+			continue;
+
+		spin_lock_bh(&qos->qos_lock);
+
+		if (unlikely(flow_status->bearer_id == 0xFF))
+			dfc_all_bearer_flow_ctl(
+				dev, qos, ack_req, ancillary, flow_status);
+		else
+			dfc_update_fc_map(
+				dev, qos, ack_req, ancillary, flow_status);
+
+		spin_unlock_bh(&qos->qos_lock);
+	}
+
+clean_out:
+	rcu_read_unlock();
+	kfree(svc_ind);
+}
+
+static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
+			    struct qmi_txn *txn, const void *data)
+{
+	struct dfc_qmi_data *dfc = container_of(qmi, struct dfc_qmi_data,
+						handle);
+	struct dfc_flow_status_ind_msg_v01 *ind_msg;
+	struct dfc_svc_ind *svc_ind;
+
+	if (qmi != &dfc->handle)
+		return;
+
+	ind_msg = (struct dfc_flow_status_ind_msg_v01 *)data;
+	if (ind_msg->flow_status_valid) {
+		if (ind_msg->flow_status_len > DFC_MAX_BEARERS_V01) {
+			pr_err("%s() Invalid fc info len: %d\n",
+			       __func__, ind_msg->flow_status_len);
+			return;
+		}
+
+		svc_ind = kzalloc(sizeof(struct dfc_svc_ind), GFP_ATOMIC);
+		if (!svc_ind)
+			return;
+
+		INIT_WORK((struct work_struct *)svc_ind,
+			  dfc_do_burst_flow_control);
+
+		memcpy(&svc_ind->dfc_info, ind_msg, sizeof(*ind_msg));
+		svc_ind->data = dfc;
+
+		queue_work(dfc->dfc_wq, (struct work_struct *)svc_ind);
+	}
+}
+
+static void dfc_svc_init(struct work_struct *work)
+{
+	int rc = 0;
+	struct dfc_qmi_data *data = container_of(work, struct dfc_qmi_data,
+						 svc_arrive);
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
+	if (!qmi)
+		goto clean_out;
+
+	rc = dfc_init_service(data, qmi);
+	if (rc < 0)
+		goto clean_out;
+
+	qmi->fc_info[data->index].dfc_client = (void *)data;
+	trace_dfc_client_state_up(data->index,
+				  qmi->fc_info[data->index].svc.instance,
+				  qmi->fc_info[data->index].svc.ep_type,
+				  qmi->fc_info[data->index].svc.iface_id);
+	return;
+
+clean_out:
+	qmi_handle_release(&data->handle);
+	destroy_workqueue(data->dfc_wq);
+	kfree(data);
+}
+
+static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
+						 handle);
+
+	data->ssctl.sq_family = AF_QIPCRTR;
+	data->ssctl.sq_node = svc->node;
+	data->ssctl.sq_port = svc->port;
+
+	queue_work(data->dfc_wq, &data->svc_arrive);
+
+	return 0;
+}
+
+static void dfc_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct dfc_qmi_data *data = container_of(qmi, struct dfc_qmi_data,
+						 handle);
+
+	if (!data)
+		pr_debug("%s() data is null\n", __func__);
+}
+
+static struct qmi_ops server_ops = {
+	.new_server = dfc_svc_arrive,
+	.del_server = dfc_svc_exit,
+};
+
+static struct qmi_msg_handler qmi_indication_handler[] = {
+	{
+		.type = QMI_INDICATION,
+		.msg_id = QMI_DFC_FLOW_STATUS_IND_V01,
+		.ei = dfc_flow_status_ind_v01_ei,
+		.decoded_size = QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN,
+		.fn = dfc_clnt_ind_cb,
+	},
+	{},
+};
+
+int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi)
+{
+	struct dfc_qmi_data *data;
+	int rc = -ENOMEM;
+
+	data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->rmnet_port = port;
+	data->index = index;
+	data->restart_state = 0;
+
+	data->dfc_wq = create_singlethread_workqueue("dfc_wq");
+	if (!data->dfc_wq) {
+		pr_err("%s Could not create workqueue\n", __func__);
+		goto err0;
+	}
+
+	INIT_WORK(&data->svc_arrive, dfc_svc_init);
+	rc = qmi_handle_init(&data->handle,
+			     QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN,
+			     &server_ops, qmi_indication_handler);
+	if (rc < 0) {
+		pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
+		goto err1;
+	}
+
+	rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
+			    DFC_SERVICE_VERS_V01,
+			    qmi->fc_info[index].svc.instance);
+	if (rc < 0) {
+		pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
+		goto err2;
+	}
+
+	return 0;
+
+err2:
+	qmi_handle_release(&data->handle);
+err1:
+	destroy_workqueue(data->dfc_wq);
+err0:
+	kfree(data);
+	return rc;
+}
+
+void dfc_qmi_client_exit(void *dfc_data)
+{
+	struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+
+	if (!data) {
+		pr_err("%s() data is null\n", __func__);
+		return;
+	}
+
+	data->restart_state = 1;
+	trace_dfc_client_state_down(data->index, 0);
+	qmi_handle_release(&data->handle);
+
+	drain_workqueue(data->dfc_wq);
+	destroy_workqueue(data->dfc_wq);
+	kfree(data);
+}
+
+void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
+			 int ip_type, u32 mark, unsigned int len)
+{
+	struct rmnet_bearer_map *bearer;
+	struct rmnet_flow_map *itm;
+	u32 start_grant;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
+	if (unlikely(!itm))
+		goto out;
+
+	bearer = qmi_rmnet_get_bearer_map(qos, itm->bearer_id);
+	if (unlikely(!bearer))
+		goto out;
+
+	trace_dfc_flow_check(dev->name, bearer->bearer_id,
+			     len, bearer->grant_size);
+
+	if (!bearer->grant_size)
+		goto out;
+
+	start_grant = bearer->grant_size;
+	if (len >= bearer->grant_size)
+		bearer->grant_size = 0;
+	else
+		bearer->grant_size -= len;
+
+	if (start_grant > bearer->grant_thresh &&
+	    bearer->grant_size <= bearer->grant_thresh) {
+		dfc_send_ack(dev, bearer->bearer_id,
+			     bearer->seq, qos->mux_id,
+			     DFC_ACK_TYPE_THRESHOLD);
+	}
+
+	if (!bearer->grant_size)
+		dfc_bearer_flow_ctl(dev, bearer, qos);
+
+out:
+	spin_unlock_bh(&qos->qos_lock);
+}
+
+void dfc_qmi_wq_flush(struct qmi_info *qmi)
+{
+	struct dfc_qmi_data *dfc_data;
+	int i;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		dfc_data = (struct dfc_qmi_data *)(qmi->fc_info[i].dfc_client);
+		if (dfc_data)
+			flush_workqueue(dfc_data->dfc_wq);
+	}
+}
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 938ca41c..4400f51 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -318,7 +318,7 @@
 	txn->dest = c_struct;
 
 	mutex_lock(&qmi->txn_lock);
-	ret = idr_alloc_cyclic(&qmi->txns, txn, 0, INT_MAX, GFP_KERNEL);
+	ret = idr_alloc_cyclic(&qmi->txns, txn, 0, U16_MAX, GFP_KERNEL);
 	if (ret < 0)
 		pr_err("failed to allocate transaction id\n");
 
@@ -345,8 +345,7 @@
 	struct qmi_handle *qmi = txn->qmi;
 	int ret;
 
-	ret = wait_for_completion_interruptible_timeout(&txn->completion,
-							timeout);
+	ret = wait_for_completion_timeout(&txn->completion, timeout);
 
 	mutex_lock(&qmi->txn_lock);
 	mutex_lock(&txn->lock);
@@ -354,9 +353,7 @@
 	mutex_unlock(&txn->lock);
 	mutex_unlock(&qmi->txn_lock);
 
-	if (ret < 0)
-		return ret;
-	else if (ret == 0)
+	if (ret == 0)
 		return -ETIMEDOUT;
 	else
 		return txn->result;
@@ -480,6 +477,9 @@
 	struct qmi_txn *txn = NULL;
 	int ret;
 
+	if (!len)
+		return;
+
 	if (len < sizeof(*hdr)) {
 		pr_err("ignoring short QMI packet\n");
 		return;
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
new file mode 100644
index 0000000..2b20e5c
--- /dev/null
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -0,0 +1,857 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <soc/qcom/qmi_rmnet.h>
+#include <soc/qcom/rmnet_qmi.h>
+#include <linux/soc/qcom/qmi.h>
+#include <linux/rtnetlink.h>
+#include <uapi/linux/rtnetlink.h>
+#include <net/pkt_sched.h>
+#include "qmi_rmnet_i.h"
+#include <trace/events/dfc.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#define NLMSG_FLOW_ACTIVATE 1
+#define NLMSG_FLOW_DEACTIVATE 2
+#define NLMSG_CLIENT_SETUP 4
+#define NLMSG_CLIENT_DELETE 5
+#define NLMSG_SCALE_FACTOR 6
+#define NLMSG_WQ_FREQUENCY 7
+
+#define FLAG_DFC_MASK 0x000F
+#define FLAG_POWERSAVE_MASK 0x0010
+#define DFC_MODE_MULTIQ 2
+
+unsigned int rmnet_wq_frequency __read_mostly = 4;
+
+#define PS_WORK_ACTIVE_BIT 0
+#define PS_INTERVAL (((!rmnet_wq_frequency) ? 1 : rmnet_wq_frequency) * HZ)
+#define NO_DELAY (0x0000 * HZ)
+
+static unsigned int qmi_rmnet_scale_factor = 5;
+
+struct qmi_elem_info data_ep_id_type_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum data_ep_type_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct data_ep_id_type_v01,
+					   ep_type),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(u32),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct data_ep_id_type_v01,
+					   iface_id),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	},
+};
+EXPORT_SYMBOL(data_ep_id_type_v01_ei);
+
+void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
+{
+	int i;
+
+	if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
+		return NULL;
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (qmi->fc_info[i].dfc_client)
+			return qmi->fc_info[i].dfc_client;
+	}
+
+	return NULL;
+}
+
+static inline int
+qmi_rmnet_has_client(struct qmi_info *qmi)
+{
+	if (qmi->wda_client)
+		return 1;
+
+	return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
+}
+
+#ifdef CONFIG_QCOM_QMI_DFC
+static void
+qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
+			  struct qos_info *qos)
+{
+	struct rmnet_bearer_map *bearer, *br_tmp;
+	struct rmnet_flow_map *itm, *fl_tmp;
+
+	ASSERT_RTNL();
+
+	list_for_each_entry_safe(itm, fl_tmp, &qos->flow_head, list) {
+		list_del(&itm->list);
+		kfree(itm);
+	}
+
+	list_for_each_entry_safe(bearer, br_tmp, &qos->bearer_head, list) {
+		list_del(&bearer->list);
+		kfree(bearer);
+	}
+}
+
+struct rmnet_flow_map *
+qmi_rmnet_get_flow_map(struct qos_info *qos, u32 flow_id, int ip_type)
+{
+	struct rmnet_flow_map *itm;
+
+	if (!qos)
+		return NULL;
+
+	list_for_each_entry(itm, &qos->flow_head, list) {
+		if ((itm->flow_id == flow_id) && (itm->ip_type == ip_type))
+			return itm;
+	}
+	return NULL;
+}
+
+struct rmnet_bearer_map *
+qmi_rmnet_get_bearer_map(struct qos_info *qos, uint8_t bearer_id)
+{
+	struct rmnet_bearer_map *itm;
+
+	if (!qos)
+		return NULL;
+
+	list_for_each_entry(itm, &qos->bearer_head, list) {
+		if (itm->bearer_id == bearer_id)
+			return itm;
+	}
+	return NULL;
+}
+
+static void qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
+				      struct rmnet_flow_map *new_map)
+{
+	itm->bearer_id = new_map->bearer_id;
+	itm->flow_id = new_map->flow_id;
+	itm->ip_type = new_map->ip_type;
+	itm->tcm_handle = new_map->tcm_handle;
+}
+
+int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable)
+{
+	struct netdev_queue *q;
+
+	if (unlikely(tcm_handle >= dev->num_tx_queues))
+		return 0;
+
+	q = netdev_get_tx_queue(dev, tcm_handle);
+	if (unlikely(!q))
+		return 0;
+
+	if (enable)
+		netif_tx_wake_queue(q);
+	else
+		netif_tx_stop_queue(q);
+
+	return 0;
+}
+
+static int qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
+			      struct qmi_info *qmi)
+{
+	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
+	struct rmnet_flow_map new_map, *itm;
+	struct rmnet_bearer_map *bearer;
+
+	if (!qos_info)
+		return -EINVAL;
+
+	ASSERT_RTNL();
+
+	/* flow activate
+	 * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
+	 * tcm->tcm_ifindex - ip_type, tcm->tcm_handle - tcm_handle
+	 */
+
+	new_map.bearer_id = tcm->tcm__pad1;
+	new_map.flow_id = tcm->tcm_parent;
+	new_map.ip_type = tcm->tcm_ifindex;
+	new_map.tcm_handle = tcm->tcm_handle;
+	trace_dfc_flow_info(dev->name, new_map.bearer_id, new_map.flow_id,
+			    new_map.ip_type, new_map.tcm_handle, 1);
+
+	spin_lock_bh(&qos_info->qos_lock);
+
+	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
+				     new_map.ip_type);
+	if (itm) {
+		qmi_rmnet_update_flow_map(itm, &new_map);
+	} else {
+		itm = kzalloc(sizeof(*itm), GFP_ATOMIC);
+		if (!itm) {
+			spin_unlock_bh(&qos_info->qos_lock);
+			return -ENOMEM;
+		}
+
+		qmi_rmnet_update_flow_map(itm, &new_map);
+		list_add(&itm->list, &qos_info->flow_head);
+
+		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
+		if (bearer) {
+			bearer->flow_ref++;
+		} else {
+			bearer = kzalloc(sizeof(*bearer), GFP_ATOMIC);
+			if (!bearer) {
+				spin_unlock_bh(&qos_info->qos_lock);
+				return -ENOMEM;
+			}
+
+			bearer->bearer_id = new_map.bearer_id;
+			bearer->flow_ref = 1;
+			bearer->grant_size = qos_info->default_grant;
+			bearer->grant_thresh =
+				qmi_rmnet_grant_per(bearer->grant_size);
+			qos_info->default_grant = DEFAULT_GRANT;
+			list_add(&bearer->list, &qos_info->bearer_head);
+		}
+
+		qmi_rmnet_flow_control(dev, itm->tcm_handle,
+				bearer->grant_size > 0 ? 1 : 0);
+
+		trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
+				 bearer->grant_size, 0, itm->tcm_handle, 1);
+	}
+
+	spin_unlock_bh(&qos_info->qos_lock);
+
+	return 0;
+}
+
+static int
+qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
+		   struct qmi_info *qmi)
+{
+	struct qos_info *qos_info = (struct qos_info *)rmnet_get_qos_pt(dev);
+	struct rmnet_flow_map new_map, *itm;
+	struct rmnet_bearer_map *bearer;
+
+	if (!qos_info)
+		return -EINVAL;
+
+	ASSERT_RTNL();
+
+	/* flow deactivate
+	 * tcm->tcm__pad1 - bearer_id, tcm->tcm_parent - flow_id,
+	 * tcm->tcm_ifindex - ip_type
+	 */
+
+	spin_lock_bh(&qos_info->qos_lock);
+
+	new_map.bearer_id = tcm->tcm__pad1;
+	new_map.flow_id = tcm->tcm_parent;
+	new_map.ip_type = tcm->tcm_ifindex;
+	itm = qmi_rmnet_get_flow_map(qos_info, new_map.flow_id,
+				     new_map.ip_type);
+	if (itm) {
+		trace_dfc_flow_info(dev->name, new_map.bearer_id,
+				    new_map.flow_id, new_map.ip_type,
+				    itm->tcm_handle, 0);
+		list_del(&itm->list);
+
+		/* Enable flow to allow new call setup */
+		qmi_rmnet_flow_control(dev, itm->tcm_handle, 1);
+		trace_dfc_qmi_tc(dev->name, itm->bearer_id, itm->flow_id,
+				 0, 0, itm->tcm_handle, 1);
+
+		/*clear bearer map*/
+		bearer = qmi_rmnet_get_bearer_map(qos_info, new_map.bearer_id);
+		if (bearer && --bearer->flow_ref == 0) {
+			list_del(&bearer->list);
+			kfree(bearer);
+		}
+
+		kfree(itm);
+	}
+
+	if (list_empty(&qos_info->flow_head)) {
+		netif_tx_wake_all_queues(dev);
+		trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
+	}
+
+	spin_unlock_bh(&qos_info->qos_lock);
+
+	return 0;
+}
+#else
+static inline void
+qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
+			   struct rmnet_flow_map *itm, int add_flow)
+{
+}
+
+static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
+{
+}
+
+static inline void
+qmi_rmnet_update_flow_map(struct rmnet_flow_map *itm,
+			  struct rmnet_flow_map *new_map)
+{
+}
+
+static inline int
+qmi_rmnet_add_flow(struct net_device *dev, struct tcmsg *tcm,
+		   struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline int
+qmi_rmnet_del_flow(struct net_device *dev, struct tcmsg *tcm,
+		   struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+#endif
+
+static int
+qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
+{
+	int idx, rc, err = 0;
+
+	ASSERT_RTNL();
+
+	/* client setup
+	 * tcm->tcm_handle - instance, tcm->tcm_info - ep_type,
+	 * tcm->tcm_parent - iface_id, tcm->tcm_ifindex - flags
+	 */
+	idx = (tcm->tcm_handle == 0) ? 0 : 1;
+
+	if (!qmi) {
+		qmi = kzalloc(sizeof(struct qmi_info), GFP_KERNEL);
+		if (!qmi)
+			return -ENOMEM;
+
+		rmnet_init_qmi_pt(port, qmi);
+	}
+
+	qmi->flag = tcm->tcm_ifindex;
+	qmi->fc_info[idx].svc.instance = tcm->tcm_handle;
+	qmi->fc_info[idx].svc.ep_type = tcm->tcm_info;
+	qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent;
+
+	if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
+	    (qmi->fc_info[idx].dfc_client == NULL)) {
+		rc = dfc_qmi_client_init(port, idx, qmi);
+		if (rc < 0)
+			err = rc;
+	}
+
+	if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
+	    (idx == 0) && (qmi->wda_client == NULL)) {
+		rc = wda_qmi_client_init(port, tcm->tcm_handle);
+		if (rc < 0)
+			err = rc;
+	}
+
+	return err;
+}
+
+static int
+__qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
+{
+
+	ASSERT_RTNL();
+
+	if (qmi->fc_info[idx].dfc_client) {
+		dfc_qmi_client_exit(qmi->fc_info[idx].dfc_client);
+		qmi->fc_info[idx].dfc_client = NULL;
+	}
+
+	if (!qmi_rmnet_has_client(qmi)) {
+		rmnet_reset_qmi_pt(port);
+		kfree(qmi);
+		return 0;
+	}
+
+	return 1;
+}
+
+static void
+qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
+{
+	int idx;
+
+	/* client delete: tcm->tcm_handle - instance*/
+	idx = (tcm->tcm_handle == 0) ? 0 : 1;
+
+	ASSERT_RTNL();
+
+	if ((idx == 0) && qmi->wda_client) {
+		wda_qmi_client_exit(qmi->wda_client);
+		qmi->wda_client = NULL;
+	}
+
+	__qmi_rmnet_delete_client(port, qmi, idx);
+}
+
+void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
+{
+	struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	struct tcmsg *tcm = (struct tcmsg *)tcm_pt;
+
+	switch (tcm->tcm_family) {
+	case NLMSG_FLOW_ACTIVATE:
+		if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) ||
+		    !qmi_rmnet_has_dfc_client(qmi))
+			return;
+
+		qmi_rmnet_add_flow(dev, tcm, qmi);
+		break;
+	case NLMSG_FLOW_DEACTIVATE:
+		if (!qmi || ((qmi->flag & FLAG_DFC_MASK) != DFC_MODE_MULTIQ))
+			return;
+
+		qmi_rmnet_del_flow(dev, tcm, qmi);
+		break;
+	case NLMSG_CLIENT_SETUP:
+		if (((tcm->tcm_ifindex & FLAG_DFC_MASK) != DFC_MODE_MULTIQ) &&
+		    !(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
+			return;
+
+		if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
+			if (!qmi_rmnet_has_client(qmi)) {
+				kfree(qmi);
+				rmnet_reset_qmi_pt(port);
+			}
+		}
+		if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+			qmi_rmnet_work_init(port);
+			rmnet_set_powersave_format(port);
+		}
+		break;
+	case NLMSG_CLIENT_DELETE:
+		if (!qmi)
+			return;
+		if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+			rmnet_clear_powersave_format(port);
+			qmi_rmnet_work_exit(port);
+		}
+		qmi_rmnet_delete_client(port, qmi, tcm);
+		break;
+	case NLMSG_SCALE_FACTOR:
+		if (!tcm->tcm_index)
+			return;
+		qmi_rmnet_scale_factor = tcm->tcm_index;
+		break
+	case NLMSG_WQ_FREQUENCY:
+		rmnet_wq_frequency = tcm->tcm_index;
+		break;
+	default:
+		pr_debug("%s(): No handler\n", __func__);
+		break;
+	}
+}
+EXPORT_SYMBOL(qmi_rmnet_change_link);
+
+void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
+{
+	struct qmi_info *qmi = (struct qmi_info *)qmi_pt;
+	int i;
+
+	if (!qmi)
+		return;
+
+	ASSERT_RTNL();
+
+	qmi_rmnet_work_exit(port);
+
+	if (qmi->wda_client) {
+		wda_qmi_client_exit(qmi->wda_client);
+		qmi->wda_client = NULL;
+	}
+
+	for (i = 0; i < MAX_CLIENT_NUM; i++) {
+		if (!__qmi_rmnet_delete_client(port, qmi, i))
+			return;
+	}
+}
+EXPORT_SYMBOL(qmi_rmnet_qmi_exit);
+
+void qmi_rmnet_enable_all_flows(struct net_device *dev)
+{
+	struct qos_info *qos;
+	struct rmnet_bearer_map *bearer;
+	int do_wake = 0;
+
+	qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+	if (!qos)
+		return;
+
+	spin_lock_bh(&qos->qos_lock);
+
+	list_for_each_entry(bearer, &qos->bearer_head, list) {
+		bearer->grant_before_ps = bearer->grant_size;
+		bearer->seq_before_ps = bearer->seq;
+		bearer->grant_size = DEFAULT_GRANT;
+		bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
+		bearer->seq = 0;
+		bearer->ack_req = 0;
+		bearer->ancillary = 0;
+		do_wake = 1;
+	}
+
+	if (do_wake) {
+		netif_tx_wake_all_queues(dev);
+		trace_dfc_qmi_tc(dev->name, 0xFF, 0, DEFAULT_GRANT, 0, 0, 1);
+	}
+
+	spin_unlock_bh(&qos->qos_lock);
+}
+EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);
+
+#ifdef CONFIG_QCOM_QMI_DFC
+void qmi_rmnet_burst_fc_check(struct net_device *dev,
+			      int ip_type, u32 mark, unsigned int len)
+{
+	struct qos_info *qos = rmnet_get_qos_pt(dev);
+
+	if (!qos)
+		return;
+
+	dfc_qmi_burst_check(dev, qos, ip_type, mark, len);
+}
+EXPORT_SYMBOL(qmi_rmnet_burst_fc_check);
+
+int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	struct qos_info *qos = rmnet_get_qos_pt(dev);
+	int txq = 0, ip_type = AF_INET;
+	unsigned int len = skb->len;
+	struct rmnet_flow_map *itm;
+	u32 mark = skb->mark;
+
+	if (!qos)
+		return 0;
+
+	switch (skb->protocol) {
+	/* TCPv4 ACKs */
+	case htons(ETH_P_IP):
+		ip_type = AF_INET;
+		if ((!mark) &&
+		    (ip_hdr(skb)->protocol == IPPROTO_TCP) &&
+		    (len == 40 || len == 52) &&
+		    (ip_hdr(skb)->ihl == 5) &&
+		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
+			return 1;
+		break;
+
+	/* TCPv6 ACKs */
+	case htons(ETH_P_IPV6):
+		ip_type = AF_INET6;
+		if ((!mark) &&
+		    (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) &&
+		    (len == 60 || len == 72) &&
+		    ((tcp_flag_word(tcp_hdr(skb)) & 0xFF00) == TCP_FLAG_ACK))
+			return 1;
+		/* Fall through */
+	}
+
+	/* Default flows */
+	if (!mark)
+		return 0;
+
+	/* Dedicated flows */
+	spin_lock_bh(&qos->qos_lock);
+
+	itm = qmi_rmnet_get_flow_map(qos, mark, ip_type);
+	if (unlikely(!itm))
+		goto done;
+
+	txq = itm->tcm_handle;
+
+done:
+	spin_unlock_bh(&qos->qos_lock);
+	return txq;
+}
+EXPORT_SYMBOL(qmi_rmnet_get_queue);
+
+inline unsigned int qmi_rmnet_grant_per(unsigned int grant)
+{
+	return grant / qmi_rmnet_scale_factor;
+}
+EXPORT_SYMBOL(qmi_rmnet_grant_per);
+
+void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
+{
+	struct qos_info *qos;
+
+	qos = kmalloc(sizeof(*qos), GFP_KERNEL);
+	if (!qos)
+		return NULL;
+
+	qos->mux_id = mux_id;
+	qos->real_dev = real_dev;
+	qos->default_grant = DEFAULT_GRANT;
+	qos->tran_num = 0;
+	INIT_LIST_HEAD(&qos->flow_head);
+	INIT_LIST_HEAD(&qos->bearer_head);
+	spin_lock_init(&qos->qos_lock);
+
+	return qos;
+}
+EXPORT_SYMBOL(qmi_rmnet_qos_init);
+
+void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
+{
+	void *port = rmnet_get_rmnet_port(dev);
+	struct qmi_info *qmi = rmnet_get_qmi_pt(port);
+	struct qos_info *qos_info = (struct qos_info *)qos;
+
+	if (!qmi || !qos)
+		return;
+
+	qmi_rmnet_clean_flow_list(qmi, dev, qos_info);
+	kfree(qos);
+}
+EXPORT_SYMBOL(qmi_rmnet_qos_exit);
+#endif
+
+#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
+static struct workqueue_struct  *rmnet_ps_wq;
+static struct rmnet_powersave_work *rmnet_work;
+static struct list_head ps_list;
+
+struct rmnet_powersave_work {
+	struct delayed_work work;
+	void *port;
+	u64 old_rx_pkts;
+	u64 old_tx_pkts;
+};
+
+void qmi_rmnet_ps_on_notify(void *port)
+{
+	struct qmi_rmnet_ps_ind *tmp;
+
+	list_for_each_entry(tmp, &ps_list, list)
+		tmp->ps_on_handler(port);
+}
+EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
+
+void qmi_rmnet_ps_off_notify(void *port)
+{
+	struct qmi_rmnet_ps_ind *tmp;
+
+	list_for_each_entry(tmp, &ps_list, list)
+		tmp->ps_off_handler(port);
+}
+EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
+
+int qmi_rmnet_ps_ind_register(void *port,
+			      struct qmi_rmnet_ps_ind *ps_ind)
+{
+
+	if (!port || !ps_ind || !ps_ind->ps_on_handler ||
+	    !ps_ind->ps_off_handler)
+		return -EINVAL;
+
+	list_add_rcu(&ps_ind->list, &ps_list);
+
+	return 0;
+}
+EXPORT_SYMBOL(qmi_rmnet_ps_ind_register);
+
+int qmi_rmnet_ps_ind_deregister(void *port,
+				struct qmi_rmnet_ps_ind *ps_ind)
+{
+	struct qmi_rmnet_ps_ind *tmp;
+
+	if (!port || !ps_ind)
+		return -EINVAL;
+
+	list_for_each_entry(tmp, &ps_list, list) {
+		if (tmp == ps_ind) {
+			list_del_rcu(&ps_ind->list);
+			goto done;
+		}
+	}
+
+done:
+	return 0;
+}
+EXPORT_SYMBOL(qmi_rmnet_ps_ind_deregister);
+
+int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
+{
+	int rc = -EINVAL;
+	struct qmi_info *qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+
+	if (!qmi || !qmi->wda_client)
+		return rc;
+
+	rc = wda_set_powersave_mode(qmi->wda_client, enable);
+	if (rc < 0) {
+		pr_err("%s() failed set powersave mode[%u], err=%d\n",
+			__func__, enable, rc);
+		return rc;
+	}
+
+	if (enable)
+		dfc_qmi_wq_flush(qmi);
+
+	return 0;
+}
+EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
+
+void qmi_rmnet_work_restart(void *port)
+{
+	if (!rmnet_ps_wq || !rmnet_work)
+		return;
+	queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
+}
+EXPORT_SYMBOL(qmi_rmnet_work_restart);
+
+static void qmi_rmnet_check_stats(struct work_struct *work)
+{
+	struct rmnet_powersave_work *real_work;
+	struct qmi_info *qmi;
+	u64 rxd, txd;
+	u64 rx, tx;
+
+	real_work = container_of(to_delayed_work(work),
+				 struct rmnet_powersave_work, work);
+
+	if (unlikely(!real_work || !real_work->port))
+		return;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(real_work->port);
+	if (unlikely(!qmi))
+		return;
+
+	if (qmi->ps_enabled) {
+		/* Retry after small delay if qmi error
+		 * This resumes UL grants by disabling
+		 * powersave mode if successful.
+		 */
+		if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) {
+			queue_delayed_work(rmnet_ps_wq,
+					   &real_work->work, HZ / 50);
+			return;
+
+		}
+		qmi->ps_enabled = 0;
+
+		if (rmnet_get_powersave_notif(real_work->port))
+			qmi_rmnet_ps_off_notify(real_work->port);
+
+
+		goto end;
+	}
+
+	rmnet_get_packets(real_work->port, &rx, &tx);
+	rxd = rx - real_work->old_rx_pkts;
+	txd = tx - real_work->old_tx_pkts;
+	real_work->old_rx_pkts = rx;
+	real_work->old_tx_pkts = tx;
+
+	if (!rxd && !txd) {
+		if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) {
+			queue_delayed_work(rmnet_ps_wq,
+					   &real_work->work, PS_INTERVAL);
+			return;
+		}
+		qmi->ps_enabled = 1;
+		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+
+		/* Enable flow after clear the bit so a new
+		 * work can be triggered.
+		 */
+		rmnet_enable_all_flows(real_work->port);
+
+		if (rmnet_get_powersave_notif(real_work->port))
+			qmi_rmnet_ps_on_notify(real_work->port);
+
+		return;
+	}
+end:
+	queue_delayed_work(rmnet_ps_wq, &real_work->work, PS_INTERVAL);
+}
+
+static void qmi_rmnet_work_set_active(void *port, int status)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi))
+		return;
+
+	if (status)
+		set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+	else
+		clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+}
+
+void qmi_rmnet_work_init(void *port)
+{
+	if (rmnet_ps_wq)
+		return;
+
+	rmnet_ps_wq = alloc_workqueue("rmnet_powersave_work",
+					WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+
+	rmnet_work = kmalloc(sizeof(*rmnet_work), GFP_ATOMIC);
+	if (!rmnet_work) {
+		destroy_workqueue(rmnet_ps_wq);
+		rmnet_ps_wq = NULL;
+		return;
+	}
+	INIT_LIST_HEAD(&ps_list);
+	INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
+	rmnet_work->port = port;
+	rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
+			  &rmnet_work->old_tx_pkts);
+
+	qmi_rmnet_work_set_active(rmnet_work->port, 1);
+	queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, PS_INTERVAL);
+}
+EXPORT_SYMBOL(qmi_rmnet_work_init);
+
+void qmi_rmnet_work_maybe_restart(void *port)
+{
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+	if (unlikely(!qmi))
+		return;
+
+	if (!test_and_set_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active))
+		qmi_rmnet_work_restart(port);
+}
+EXPORT_SYMBOL(qmi_rmnet_work_maybe_restart);
+
+void qmi_rmnet_work_exit(void *port)
+{
+	if (!rmnet_ps_wq || !rmnet_work)
+		return;
+	cancel_delayed_work_sync(&rmnet_work->work);
+	destroy_workqueue(rmnet_ps_wq);
+	qmi_rmnet_work_set_active(port, 0);
+	rmnet_ps_wq = NULL;
+	kfree(rmnet_work);
+	rmnet_work = NULL;
+}
+EXPORT_SYMBOL(qmi_rmnet_work_exit);
+#endif
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
new file mode 100644
index 0000000..9232a59
--- /dev/null
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _RMNET_QMI_I_H
+#define _RMNET_QMI_I_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+#define IP_VER_4 4
+#define IP_VER_6 6
+
+#define MAX_CLIENT_NUM 2
+#define MAX_FLOW_NUM 32
+#define DEFAULT_GRANT 10240
+
+struct rmnet_flow_map {
+	struct list_head list;
+	u8 bearer_id;
+	u32 flow_id;
+	int ip_type;
+	u32 tcm_handle;
+};
+
+struct rmnet_bearer_map {
+	struct list_head list;
+	u8 bearer_id;
+	int flow_ref;
+	u32 grant_size;
+	u32 grant_thresh;
+	u16 seq;
+	u8  ack_req;
+	u32 grant_before_ps;
+	u16 seq_before_ps;
+	u32 ancillary;
+};
+
+struct svc_info {
+	u32 instance;
+	u32 ep_type;
+	u32 iface_id;
+};
+
+struct fc_info {
+	struct svc_info svc;
+	void *dfc_client;
+};
+
+struct qos_info {
+	u8 mux_id;
+	struct net_device *real_dev;
+	struct list_head flow_head;
+	struct list_head bearer_head;
+	u32 default_grant;
+	u32 tran_num;
+	spinlock_t qos_lock;
+};
+
+struct flow_info {
+	struct net_device *dev;
+	struct rmnet_flow_map *itm;
+};
+
+struct qmi_info {
+	int flag;
+	void *wda_client;
+	struct fc_info fc_info[MAX_CLIENT_NUM];
+	unsigned long ps_work_active;
+	int ps_enabled;
+};
+
+enum data_ep_type_enum_v01 {
+	DATA_EP_TYPE_ENUM_MIN_ENUM_VAL_V01 = INT_MIN,
+	DATA_EP_TYPE_RESERVED_V01 = 0x00,
+	DATA_EP_TYPE_HSIC_V01 = 0x01,
+	DATA_EP_TYPE_HSUSB_V01 = 0x02,
+	DATA_EP_TYPE_PCIE_V01 = 0x03,
+	DATA_EP_TYPE_EMBEDDED_V01 = 0x04,
+	DATA_EP_TYPE_ENUM_MAX_ENUM_VAL_V01 = INT_MAX
+};
+
+struct data_ep_id_type_v01 {
+
+	enum data_ep_type_enum_v01 ep_type;
+	u32 iface_id;
+};
+
+extern struct qmi_elem_info data_ep_id_type_v01_ei[];
+
+void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi);
+
+#ifdef CONFIG_QCOM_QMI_DFC
+struct rmnet_flow_map *
+qmi_rmnet_get_flow_map(struct qos_info *qos_info,
+		       u32 flow_id, int ip_type);
+
+struct rmnet_bearer_map *
+qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id);
+
+unsigned int qmi_rmnet_grant_per(unsigned int grant);
+
+int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);
+
+void dfc_qmi_client_exit(void *dfc_data);
+
+void dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
+			 int ip_type, u32 mark, unsigned int len);
+
+int qmi_rmnet_flow_control(struct net_device *dev, u32 tcm_handle, int enable);
+
+void dfc_qmi_wq_flush(struct qmi_info *qmi);
+
+#else
+static inline struct rmnet_flow_map *
+qmi_rmnet_get_flow_map(struct qos_info *qos_info,
+		       uint32_t flow_id, int ip_type)
+{
+	return NULL;
+}
+
+static inline struct rmnet_bearer_map *
+qmi_rmnet_get_bearer_map(struct qos_info *qos_info, uint8_t bearer_id)
+{
+	return NULL;
+}
+
+static inline int
+dfc_qmi_client_init(void *port, int modem, struct qmi_info *qmi)
+{
+	return -EINVAL;
+}
+
+static inline void dfc_qmi_client_exit(void *dfc_data)
+{
+}
+
+static inline void
+dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
+		    int ip_type, u32 mark, unsigned int len)
+{
+}
+
+static inline void
+dfc_qmi_wq_flush(struct qmi_info *qmi)
+{
+}
+#endif
+
+#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
+int wda_qmi_client_init(void *port, uint32_t instance);
+void wda_qmi_client_exit(void *wda_data);
+int wda_set_powersave_mode(void *wda_data, uint8_t enable);
+#else
+static inline int wda_qmi_client_init(void *port, uint32_t instance)
+{
+	return -EINVAL;
+}
+
+static inline void wda_qmi_client_exit(void *wda_data)
+{
+}
+
+static inline int wda_set_powersave_mode(void *wda_data, uint8_t enable)
+{
+	return -EINVAL;
+}
+#endif
+#endif /*_RMNET_QMI_I_H*/
diff --git a/drivers/soc/qcom/qsee_ipc_irq.c b/drivers/soc/qcom/qsee_ipc_irq.c
new file mode 100644
index 0000000..e25b99e
--- /dev/null
+++ b/drivers/soc/qcom/qsee_ipc_irq.c
@@ -0,0 +1,358 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MAX_BANK_IRQ 32
+
+#define hwirq_to_index(_irq) (_irq / MAX_BANK_IRQ)
+#define hwirq_to_bit(_irq) (_irq % MAX_BANK_IRQ)
+#define to_hwirq(_index, _bit) ((_index * MAX_BANK_IRQ) + _bit)
+
+struct qsee_irq_data {
+	const char *name;
+	u32 status;
+	u32 clear;
+	u32 mask;
+	u32 msb;
+};
+
+struct qsee_irq_bank {
+	const struct qsee_irq_data *data;
+	int irq;
+
+	DECLARE_BITMAP(irq_enabled, MAX_BANK_IRQ);
+	DECLARE_BITMAP(irq_rising, MAX_BANK_IRQ);
+	DECLARE_BITMAP(irq_falling, MAX_BANK_IRQ);
+};
+
+struct qsee_irq {
+	struct device *dev;
+
+	struct irq_domain *domain;
+	struct regmap *regmap;
+
+	int num_banks;
+	struct qsee_irq_bank *banks;
+};
+
+/**
+ * qsee_intr() - interrupt handler for incoming notifications
+ * @irq:	unused
+ * @data:	qsee driver context
+ *
+ * Handle notifications from the remote side to handle newly allocated entries
+ * or any changes to the state bits of existing entries.
+ */
+static irqreturn_t qsee_intr(int irq, void *data)
+{
+	struct qsee_irq *qirq = data;
+	struct qsee_irq_bank *bank = NULL;
+	struct irq_desc *desc;
+	int irq_pin;
+	u32 status;
+	u32 mask;
+	int i;
+	int j;
+
+	for (i = 0; i < qirq->num_banks; i++) {
+		if (qirq->banks[i].irq == irq) {
+			bank = &qirq->banks[i];
+			break;
+		}
+	}
+	if (!bank) {
+		dev_err(qirq->dev, "Unable to find bank for irq:%d\n", irq);
+		return IRQ_HANDLED;
+	}
+
+	if (regmap_read(qirq->regmap, bank->data->status, &status)) {
+		dev_err(qirq->dev, "Error reading irq %d status\n", irq);
+		return IRQ_HANDLED;
+	}
+	if (regmap_read(qirq->regmap, bank->data->mask, &mask)) {
+		dev_err(qirq->dev, "Error reading irq %d mask\n", irq);
+		return IRQ_HANDLED;
+	}
+
+	for_each_set_bit(j, bank->irq_enabled, bank->data->msb) {
+		if (!(status & BIT(j)))
+			continue;
+
+		irq_pin = irq_find_mapping(qirq->domain, to_hwirq(i, j));
+		desc = irq_to_desc(irq_pin);
+		if (desc)
+			handle_simple_irq(desc);
+		regmap_write(qirq->regmap, bank->data->clear, BIT(j));
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void qsee_mask_irq(struct irq_data *irqd)
+{
+	struct qsee_irq *qirq = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+	struct qsee_irq_bank *bank;
+	int index;
+	u32 mask;
+	int bit;
+
+	index = hwirq_to_index(irq);
+	bit = hwirq_to_bit(irq);
+	bank = &qirq->banks[index];
+
+	regmap_read(qirq->regmap, bank->data->mask, &mask);
+	mask |= BIT(bit);
+	regmap_write(qirq->regmap, bank->data->mask, mask);
+
+	clear_bit(bit, bank->irq_enabled);
+}
+
+static void qsee_unmask_irq(struct irq_data *irqd)
+{
+	struct qsee_irq *qirq = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+	struct qsee_irq_bank *bank;
+	int index;
+	u32 mask;
+	int bit;
+
+	index = hwirq_to_index(irq);
+	bit = hwirq_to_bit(irq);
+	bank = &qirq->banks[index];
+
+	regmap_read(qirq->regmap, bank->data->mask, &mask);
+	mask &= ~(BIT(bit));
+	regmap_write(qirq->regmap, bank->data->mask, mask);
+
+	set_bit(bit, bank->irq_enabled);
+}
+
+static int qsee_set_irq_type(struct irq_data *irqd, unsigned int type)
+{
+	struct qsee_irq *qirq = irq_data_get_irq_chip_data(irqd);
+	irq_hw_number_t irq = irqd_to_hwirq(irqd);
+	struct qsee_irq_bank *bank;
+	int index;
+	int bit;
+
+	index = hwirq_to_index(irq);
+	bit = hwirq_to_bit(irq);
+	bank = &qirq->banks[index];
+
+	if (!(type & IRQ_TYPE_EDGE_BOTH))
+		return -EINVAL;
+
+	if (type & IRQ_TYPE_EDGE_RISING)
+		set_bit(bit, bank->irq_rising);
+	else
+		clear_bit(bit, bank->irq_rising);
+
+	if (type & IRQ_TYPE_EDGE_FALLING)
+		set_bit(bit, bank->irq_falling);
+	else
+		clear_bit(bit, bank->irq_falling);
+
+	return 0;
+}
+
+static struct irq_chip qsee_irq_chip = {
+	.name           = "qsee",
+	.irq_mask       = qsee_mask_irq,
+	.irq_unmask     = qsee_unmask_irq,
+	.irq_set_type	= qsee_set_irq_type,
+};
+
+static int qsee_irq_map(struct irq_domain *d,
+			 unsigned int irq,
+			 irq_hw_number_t hw)
+{
+	struct qsee_irq *qirq = d->host_data;
+
+	irq_set_chip_and_handler(irq, &qsee_irq_chip, handle_level_irq);
+	irq_set_chip_data(irq, qirq);
+	irq_set_noprobe(irq);
+
+	return 0;
+}
+
+static int qsee_irq_xlate_threecell(struct irq_domain *d,
+				    struct device_node *ctrlr,
+				    const u32 *intspec,
+				    unsigned int intsize,
+				    irq_hw_number_t *out_hwirq,
+				    unsigned int *out_type)
+{
+	struct qsee_irq *qirq = d->host_data;
+	u32 index, bit;
+
+	if (WARN_ON(intsize < 3))
+		return -EINVAL;
+
+	index = intspec[0];
+	if (WARN_ON(index >= qirq->num_banks))
+		return -EINVAL;
+
+	bit = intspec[1];
+	if (WARN_ON(bit >= qirq->banks[index].data->msb))
+		return -EINVAL;
+
+	*out_hwirq = (index * MAX_BANK_IRQ) + bit;
+	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+
+	return 0;
+}
+
+static const struct irq_domain_ops qsee_irq_ops = {
+	.map = qsee_irq_map,
+	.xlate = qsee_irq_xlate_threecell,
+};
+
+static int qsee_irq_probe(struct platform_device *pdev)
+{
+	const struct qsee_irq_data *data;
+	struct device *dev = &pdev->dev;
+	struct qsee_irq_bank *bank;
+	struct device_node *syscon;
+	struct qsee_irq *qirq;
+	int irq_count;
+	u32 mask;
+	int idx;
+	int ret;
+	int i;
+
+	qirq = devm_kzalloc(dev, sizeof(*qirq), GFP_KERNEL);
+	if (!qirq)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, qirq);
+	qirq->dev = dev;
+
+	syscon = of_parse_phandle(dev->of_node, "syscon", 0);
+	if (!syscon) {
+		dev_err(dev, "no syscon node\n");
+		return -ENODEV;
+	}
+
+	qirq->regmap = syscon_node_to_regmap(syscon);
+	if (IS_ERR(qirq->regmap))
+		return PTR_ERR(qirq->regmap);
+
+	data = (struct qsee_irq_data *)of_device_get_match_data(dev);
+	if (!data)
+		return -ENODEV;
+
+	irq_count = platform_irq_count(pdev);
+	qirq->banks = devm_kzalloc(dev, sizeof(*qirq->banks) * irq_count,
+				   GFP_KERNEL);
+	if (!qirq->banks)
+		return -ENOMEM;
+
+	qirq->num_banks = irq_count;
+	for (i = 0; data[i].name; i++) {
+		idx = of_property_match_string(dev->of_node, "interrupt-names",
+					       data[i].name);
+		if (idx < 0)
+			return -EINVAL;
+
+		bank = &qirq->banks[idx];
+		bank->data = &data[i];
+		bank->irq = platform_get_irq(pdev, idx);
+		if (bank->irq < 0) {
+			dev_err(dev, "unable to acquire %s interrupt\n",
+				data[i].name);
+			return -EINVAL;
+		}
+
+		/* Mask all interrupts until client registers */
+		mask = (1 << bank->data->msb) - 1;
+		regmap_write(qirq->regmap, bank->data->mask, mask);
+
+		ret = devm_request_irq(dev, bank->irq, qsee_intr,
+				       IRQF_NO_SUSPEND, "qsee_irq", qirq);
+		if (ret) {
+			dev_err(dev, "failed to request interrupt\n");
+			return ret;
+		}
+	}
+
+	qirq->domain = irq_domain_add_linear(dev->of_node, 32 * irq_count,
+						 &qsee_irq_ops, qirq);
+	if (!qirq->domain) {
+		dev_err(dev, "failed to add irq_domain\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int qsee_irq_remove(struct platform_device *pdev)
+{
+	struct qsee_irq *qirq = platform_get_drvdata(pdev);
+
+	irq_domain_remove(qirq->domain);
+
+	return 0;
+}
+
+static const struct qsee_irq_data qsee_irq_data_init[] = {
+	{
+		.name = "sp_ipc0",
+		.status = 0x6000,
+		.clear = 0x6008,
+		.mask = 0x601C,
+		.msb = 4,
+	},
+	{
+		.name = "sp_ipc1",
+		.status = 0x8000,
+		.clear = 0x8008,
+		.mask = 0x801C,
+		.msb = 4,
+	},
+	{},
+};
+
+static const struct of_device_id qsee_irq_of_match[] = {
+	{ .compatible = "qcom,sm8150-qsee-irq", .data = &qsee_irq_data_init},
+	{ .compatible = "qcom,kona-qsee-irq", .data = &qsee_irq_data_init},
+	{},
+};
+MODULE_DEVICE_TABLE(of, qsee_irq_of_match);
+
+static struct platform_driver qsee_irq_driver = {
+	.probe = qsee_irq_probe,
+	.remove = qsee_irq_remove,
+	.driver = {
+			.name = "qsee_irq",
+			.of_match_table = qsee_irq_of_match,
+	},
+};
+
+static int __init qsee_irq_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&qsee_irq_driver);
+	if (rc)
+		pr_err("%s: platform driver reg failed %d\n", __func__, rc);
+
+	return rc;
+}
+postcore_initcall(qsee_irq_init);
+
+MODULE_DESCRIPTION("QTI Secure Execution Environment IRQ driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/soc/qcom/qsee_ipc_irq_bridge.c b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
new file mode 100644
index 0000000..8add82b
--- /dev/null
+++ b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
@@ -0,0 +1,624 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/cdev.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#define MODULE_NAME "qsee_ipc_irq_bridge"
+#define DEVICE_NAME MODULE_NAME
+#define NUM_LOG_PAGES 4
+
+#define QIIB_DBG(x...) do { \
+	if (qiib_info->log_ctx) \
+		ipc_log_string(qiib_info->log_ctx, x); \
+	else \
+		pr_debug(x); \
+	} while (0)
+
+#define QIIB_ERR(x...) do { \
+	pr_err(x); \
+	if (qiib_info->log_ctx) \
+		ipc_log_string(qiib_info->log_ctx, x); \
+	} while (0)
+
+static void qiib_cleanup(void);
+
+/**
+ * qiib_dev - QSEE IPC IRQ bridge device
+ * @dev_list:		qiib device list.
+ * @i:			Index to this character device.
+ * @dev_name:		Device node name used by the clients.
+ * @cdev:		structure to the internal character device.
+ * @devicep:		Pointer to the qiib class device structure.
+ * @poll_wait_queue:	poll thread wait queue.
+ * @irq_num:		IRQ number usd for this device.
+ * @rx_irq_reset_reg:	Reference to the register to reset the rx irq
+ *			line, if applicable.
+ * @irq_mask:		Mask written to @rx_irq_reset_reg to clear the irq.
+ * @irq_pending_count:	The number of IRQs pending.
+ * @irq_pending_count_lock: Lock to protect @irq_pending_cont.
+ * @ssr_name:		Name of the subsystem recognized by the SSR framework.
+ * @nb:			SSR Notifier callback.
+ * @notifier_handle:	SSR Notifier handle.
+ * @in_reset:		Flag to check the SSR state.
+ */
+struct qiib_dev {
+	struct list_head dev_list;
+	uint32_t i;
+
+	const char *dev_name;
+	struct cdev cdev;
+	struct device *devicep;
+
+	wait_queue_head_t poll_wait_queue;
+
+	uint32_t irq_line;
+	void __iomem *rx_irq_reset_reg;
+	uint32_t irq_mask;
+	uint32_t irq_pending_count;
+	spinlock_t irq_pending_count_lock;
+
+	const char *ssr_name;
+	struct notifier_block nb;
+	void *notifier_handle;
+	bool in_reset;
+};
+
+/**
+ * qiib_driver_data - QSEE IPC IRQ bridge driver data
+ * @list:		list of all nodes devices.
+ * @list_lock:		lock to synchronize the @list access.
+ * @nprots:		Number of device nodes.
+ * @classp:		Pointer to the device class.
+ * @dev_num:		qiib device number.
+ * @log_ctx:		pointer to the ipc logging context.
+ */
+struct qiib_driver_data {
+	struct list_head list;
+	struct mutex list_lock;
+
+	int nports;
+	struct class *classp;
+	dev_t dev_num;
+
+	void *log_ctx;
+};
+
+static struct qiib_driver_data *qiib_info;
+
+/**
+ * qiib_driver_data_init() - Initialize the QIIB driver data.
+ *
+ * This function used to initialize the driver specific data
+ * during the module init.
+ *
+ * Return:	0 for success, Standard Linux errors
+ */
+static int qiib_driver_data_init(void)
+{
+	qiib_info = kzalloc(sizeof(*qiib_info), GFP_KERNEL);
+	if (!qiib_info)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&qiib_info->list);
+	mutex_init(&qiib_info->list_lock);
+
+	qiib_info->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
+						"qsee_ipc_irq_bridge", 0);
+	if (!qiib_info->log_ctx)
+		QIIB_ERR("%s: unable to create logging context\n", __func__);
+
+	return 0;
+}
+
+/**
+ * qiib_driver_data_deinit() - De-Initialize the QIIB driver data.
+ *
+ * This function used to de-initialize the driver specific data
+ * during the module exit.
+ */
+static void qiib_driver_data_deinit(void)
+{
+	qiib_cleanup();
+	if (!qiib_info->log_ctx)
+		ipc_log_context_destroy(qiib_info->log_ctx);
+	kfree(qiib_info);
+	qiib_info = NULL;
+}
+
+/**
+ * qiib_restart_notifier_cb() - SSR restart notifier callback function
+ * @this:	Notifier block used by the SSR framework
+ * @code:	The SSR code for which stage of restart is occurring
+ * @data:	Structure containing private data - not used here.
+ *
+ * This function is a callback for the SSR framework. From here we initiate
+ * our handling of SSR.
+ *
+ * Return: Status of SSR handling
+ */
+static int qiib_restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	struct qiib_dev *devp = container_of(this, struct qiib_dev, nb);
+
+	if (code == SUBSYS_BEFORE_SHUTDOWN) {
+		QIIB_DBG("%s: %s: subsystem restart for %s\n", __func__,
+				"SUBSYS_BEFORE_SHUTDOWN",
+				devp->ssr_name);
+		devp->in_reset = true;
+		wake_up_interruptible(&devp->poll_wait_queue);
+	} else if (code == SUBSYS_AFTER_POWERUP) {
+		QIIB_DBG("%s: %s: subsystem restart for %s\n", __func__,
+				"SUBSYS_AFTER_POWERUP",
+				devp->ssr_name);
+		devp->in_reset = false;
+	}
+	return NOTIFY_DONE;
+}
+
+/**
+ * qiib_poll() - poll() syscall for the qiib device
+ * @file:	Pointer to the file structure.
+ * @wait:	pointer to Poll table.
+ *
+ * This function is used to poll on the qiib device when
+ * userspace client do a poll() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ *
+ * Return: POLLIN for interrupt intercepted case and POLLRDHUP for SSR.
+ */
+static unsigned int qiib_poll(struct file *file, poll_table *wait)
+{
+	struct qiib_dev *devp = file->private_data;
+	unsigned int mask = 0;
+	unsigned long flags;
+
+	if (!devp) {
+		QIIB_ERR("%s on NULL device\n", __func__);
+		return POLLERR;
+	}
+
+	if (devp->in_reset)
+		return POLLRDHUP;
+
+	poll_wait(file, &devp->poll_wait_queue, wait);
+	spin_lock_irqsave(&devp->irq_pending_count_lock, flags);
+	if (devp->irq_pending_count) {
+		mask |= POLLIN;
+		QIIB_DBG("%s set POLLIN on [%s] count[%d]\n",
+					__func__, devp->dev_name,
+					devp->irq_pending_count);
+		devp->irq_pending_count = 0;
+	}
+	spin_unlock_irqrestore(&devp->irq_pending_count_lock, flags);
+
+	if (devp->in_reset) {
+		mask |= POLLRDHUP;
+		QIIB_DBG("%s set POLLRDHUP on [%s] count[%d]\n",
+					__func__, devp->dev_name,
+					devp->irq_pending_count);
+	}
+	return mask;
+}
+
+/**
+ * qiib_open() - open() syscall for the qiib device
+ * @inode:	Pointer to the inode structure.
+ * @file:	Pointer to the file structure.
+ *
+ * This function is used to open the qiib device when
+ * userspace client do a open() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ *
+ * Return:	0 for success, Standard Linux errors
+ */
+static int qiib_open(struct inode *inode, struct file *file)
+{
+	struct qiib_dev *devp = NULL;
+
+	devp = container_of(inode->i_cdev, struct qiib_dev, cdev);
+	if (!devp) {
+		QIIB_ERR("%s on NULL device\n", __func__);
+		return -EINVAL;
+	}
+	file->private_data = devp;
+	QIIB_DBG("%s on [%s]\n", __func__, devp->dev_name);
+	return 0;
+}
+
+/**
+ * qiib_release() - release operation on qiibdevice
+ * @inode:	Pointer to the inode structure.
+ * @file:	Pointer to the file structure.
+ *
+ * This function is used to release the qiib device when
+ * userspace client do a close() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static int qiib_release(struct inode *inode, struct file *file)
+{
+	struct qiib_dev *devp = file->private_data;
+
+	if (!devp) {
+		QIIB_ERR("%s on NULL device\n", __func__);
+		return -EINVAL;
+	}
+
+	QIIB_DBG("%s on [%s]\n", __func__, devp->dev_name);
+	return 0;
+}
+
+static const struct file_operations qiib_fops = {
+	.owner = THIS_MODULE,
+	.open = qiib_open,
+	.release = qiib_release,
+	.poll = qiib_poll,
+};
+
+/**
+ * qiib_add_device() - Initialize qiib device and add cdev
+ * @devp:	pointer to the qiib device.
+ * @i:		index of the qiib device.
+ *
+ * Return:	0 for success, Standard Linux errors
+ */
+static int qiib_add_device(struct qiib_dev *devp, int i)
+{
+	int ret = 0;
+
+	devp->i = i;
+	init_waitqueue_head(&devp->poll_wait_queue);
+	spin_lock_init(&devp->irq_pending_count_lock);
+
+	cdev_init(&devp->cdev, &qiib_fops);
+	devp->cdev.owner = THIS_MODULE;
+
+	ret = cdev_add(&devp->cdev, qiib_info->dev_num + i, 1);
+	if (IS_ERR_VALUE((unsigned long)ret)) {
+		QIIB_ERR("%s: cdev_add() failed for dev [%s] ret:%i\n",
+			__func__, devp->dev_name, ret);
+		return ret;
+	}
+
+	devp->devicep = device_create(qiib_info->classp,
+			      NULL,
+			      (qiib_info->dev_num + i),
+			      NULL,
+			      devp->dev_name);
+
+	if (IS_ERR_OR_NULL(devp->devicep)) {
+		QIIB_ERR("%s: device_create() failed for dev [%s]\n",
+			__func__, devp->dev_name);
+		ret = -ENOMEM;
+		cdev_del(&devp->cdev);
+		return ret;
+	}
+
+	mutex_lock(&qiib_info->list_lock);
+	list_add(&devp->dev_list, &qiib_info->list);
+	mutex_unlock(&qiib_info->list_lock);
+
+	return ret;
+}
+
+static irqreturn_t qiib_irq_handler(int irq, void *priv)
+{
+	struct qiib_dev *devp = priv;
+	unsigned long flags;
+
+	spin_lock_irqsave(&devp->irq_pending_count_lock, flags);
+	devp->irq_pending_count++;
+	spin_unlock_irqrestore(&devp->irq_pending_count_lock, flags);
+	wake_up_interruptible(&devp->poll_wait_queue);
+
+	if (devp->rx_irq_reset_reg)
+		writel_relaxed(devp->irq_mask, devp->rx_irq_reset_reg);
+
+	QIIB_DBG("%s name[%s] pend_count[%d]\n", __func__,
+				devp->dev_name, devp->irq_pending_count);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * qiib_parse_node() - parse node from device tree binding
+ * @node:	pointer to device tree node
+ * @devp:	pointer to the qiib device
+ *
+ * Return:	0 on success, -ENODEV on failure.
+ */
+static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
+{
+	const char *subsys_name;
+	const char *dev_name;
+	char *key;
+	int ret;
+
+	key = "qcom,dev-name";
+	ret = of_property_read_string(node, key, &dev_name);
+	if (ret) {
+		QIIB_ERR("%s: missing key: %s\n", __func__, key);
+		return ret;
+	}
+	QIIB_DBG("%s: %s = %s\n", __func__, key, dev_name);
+
+	key = "label";
+	ret = of_property_read_string(node, key, &subsys_name);
+	if (ret) {
+		QIIB_ERR("%s: missing key: %s\n", __func__, key);
+		return ret;
+	}
+	QIIB_DBG("%s: %s = %s\n", __func__, key, subsys_name);
+
+	devp->dev_name = dev_name;
+	devp->ssr_name = subsys_name;
+
+	return ret;
+}
+
+static int qiib_init_notifs(struct device_node *node, struct qiib_dev *devp)
+{
+	struct irq_data *irqtype_data;
+	uint32_t irq_clear[2];
+	uint32_t irqtype;
+	char *key;
+	int ret = -ENODEV;
+
+	key = "interrupts";
+	devp->irq_line = irq_of_parse_and_map(node, 0);
+	if (!devp->irq_line) {
+		QIIB_ERR("%s: missing key: %s\n", __func__, key);
+		goto missing_key;
+	}
+	QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_line);
+
+	irqtype_data = irq_get_irq_data(devp->irq_line);
+	if (!irqtype_data) {
+		QIIB_ERR("%s: get irqdata fail:%d\n", __func__, devp->irq_line);
+		goto missing_key;
+	}
+	irqtype = irqd_get_trigger_type(irqtype_data);
+	QIIB_DBG("%s: irqtype = %d\n", __func__, irqtype);
+
+	if (irqtype & IRQF_TRIGGER_HIGH) {
+		key = "qcom,rx-irq-clr-mask";
+		ret = of_property_read_u32(node, key, &devp->irq_mask);
+		if (ret) {
+			QIIB_ERR("%s: missing key: %s\n", __func__, key);
+			ret = -ENODEV;
+			goto missing_key;
+		}
+		QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_mask);
+
+		key = "qcom,rx-irq-clr";
+		ret = of_property_read_u32_array(node, key, irq_clear,
+							ARRAY_SIZE(irq_clear));
+		if (ret) {
+			QIIB_ERR("%s: missing key: %s\n", __func__, key);
+			ret = -ENODEV;
+			goto missing_key;
+		}
+
+		devp->rx_irq_reset_reg = ioremap_nocache(irq_clear[0],
+								irq_clear[1]);
+		if (!devp->rx_irq_reset_reg) {
+			QIIB_ERR("%s: unable to map rx reset reg\n", __func__);
+			ret = -ENOMEM;
+			goto missing_key;
+		}
+	}
+
+	devp->nb.notifier_call = qiib_restart_notifier_cb;
+	devp->notifier_handle = subsys_notif_register_notifier(devp->ssr_name,
+								&devp->nb);
+	if (IS_ERR_OR_NULL(devp->notifier_handle)) {
+		QIIB_ERR("%s: Could not register SSR notifier cb\n", __func__);
+		ret = -EINVAL;
+		goto ssr_reg_fail;
+	}
+
+	ret = request_irq(devp->irq_line, qiib_irq_handler, irqtype,
+			devp->dev_name, devp);
+	if (ret < 0) {
+		QIIB_ERR("%s: request_irq() failed on %d\n", __func__,
+				devp->irq_line);
+		goto req_irq_fail;
+	}
+
+	return ret;
+
+req_irq_fail:
+	subsys_notif_unregister_notifier(devp->notifier_handle,	&devp->nb);
+ssr_reg_fail:
+	if (devp->rx_irq_reset_reg) {
+		iounmap(devp->rx_irq_reset_reg);
+		devp->rx_irq_reset_reg = NULL;
+	}
+missing_key:
+	return ret;
+}
+
+/**
+ * qiib_cleanup - cleanup all the resources
+ *
+ * This function remove all the memory and unregister
+ * the char device region.
+ */
+static void qiib_cleanup(void)
+{
+	struct qiib_dev *devp;
+	struct qiib_dev *index;
+
+	mutex_lock(&qiib_info->list_lock);
+	list_for_each_entry_safe(devp, index, &qiib_info->list, dev_list) {
+		cdev_del(&devp->cdev);
+		list_del(&devp->dev_list);
+		device_destroy(qiib_info->classp,
+			       MKDEV(MAJOR(qiib_info->dev_num), devp->i));
+		if (devp->notifier_handle)
+			subsys_notif_unregister_notifier(devp->notifier_handle,
+								&devp->nb);
+		kfree(devp);
+	}
+	mutex_unlock(&qiib_info->list_lock);
+
+	if (!IS_ERR_OR_NULL(qiib_info->classp))
+		class_destroy(qiib_info->classp);
+
+	unregister_chrdev_region(MAJOR(qiib_info->dev_num), qiib_info->nports);
+}
+
+/**
+ * qiib_alloc_chrdev_region() - allocate the char device region
+ *
+ * This function allocate memory for qiib character-device region and
+ * create the class.
+ */
+static int qiib_alloc_chrdev_region(void)
+{
+	int ret;
+
+	ret = alloc_chrdev_region(&qiib_info->dev_num,
+			       0,
+			       qiib_info->nports,
+			       DEVICE_NAME);
+	if (IS_ERR_VALUE((unsigned long)ret)) {
+		QIIB_ERR("%s: alloc_chrdev_region() failed ret:%i\n",
+			__func__, ret);
+		return ret;
+	}
+
+	qiib_info->classp = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(qiib_info->classp)) {
+		QIIB_ERR("%s: class_create() failed ENOMEM\n", __func__);
+		ret = -ENOMEM;
+		unregister_chrdev_region(MAJOR(qiib_info->dev_num),
+						qiib_info->nports);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qsee_ipc_irq_bridge_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node;
+	struct qiib_dev *devp;
+	int i = 0;
+
+	qiib_info->nports = of_get_available_child_count(pdev->dev.of_node);
+	if (!qiib_info->nports) {
+		QIIB_ERR("%s:Fail nports = %d\n", __func__, qiib_info->nports);
+		return -EINVAL;
+	}
+
+	ret = qiib_alloc_chrdev_region();
+	if (ret) {
+		QIIB_ERR("%s: chrdev_region allocation failed ret:%i\n",
+			__func__, ret);
+		return ret;
+	}
+
+	for_each_available_child_of_node(pdev->dev.of_node, node) {
+		devp = kzalloc(sizeof(*devp), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(devp)) {
+			QIIB_ERR("%s:Allocation failed id:%d\n", __func__, i);
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		ret = qiib_parse_node(node, devp);
+		if (ret) {
+			QIIB_ERR("%s:qiib_parse_node failed %d\n", __func__, i);
+			goto error;
+		}
+
+		ret = qiib_add_device(devp, i);
+		if (ret < 0) {
+			QIIB_ERR("%s: add [%s] device failed ret=%d\n",
+					__func__, devp->dev_name, ret);
+			goto error;
+		}
+
+		ret = qiib_init_notifs(node, devp);
+		if (ret < 0) {
+			QIIB_ERR("%s: qiib_init_notifs failed ret=%d\n",
+					__func__, ret);
+			goto error;
+		}
+
+		i++;
+	}
+
+	QIIB_DBG("%s: Driver Initialized.\n", __func__);
+	return 0;
+
+error:
+	qiib_cleanup();
+	return ret;
+}
+
+static int qsee_ipc_irq_bridge_remove(struct platform_device *pdev)
+{
+	qiib_cleanup();
+	return 0;
+}
+
+static const struct of_device_id qsee_ipc_irq_bridge_match_table[] = {
+	{ .compatible = "qcom,qsee-ipc-irq-bridge" },
+	{},
+};
+
+static struct platform_driver qsee_ipc_irq_bridge_driver = {
+	.probe = qsee_ipc_irq_bridge_probe,
+	.remove = qsee_ipc_irq_bridge_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.of_match_table = qsee_ipc_irq_bridge_match_table,
+	 },
+};
+
+static int __init qsee_ipc_irq_bridge_init(void)
+{
+	int ret;
+
+	ret = qiib_driver_data_init();
+	if (ret) {
+		QIIB_ERR("%s: driver data init failed %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	ret = platform_driver_register(&qsee_ipc_irq_bridge_driver);
+	if (ret) {
+		QIIB_ERR("%s: platform driver register failed %d\n",
+			__func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(qsee_ipc_irq_bridge_init);
+
+static void __exit qsee_ipc_irq_bridge_exit(void)
+{
+	platform_driver_unregister(&qsee_ipc_irq_bridge_driver);
+	qiib_driver_data_deinit();
+}
+module_exit(qsee_ipc_irq_bridge_exit);
+MODULE_DESCRIPTION("QSEE IPC interrupt bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
index 8244ba0..dfc3e91 100644
--- a/drivers/soc/qcom/rpmh-internal.h
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -28,7 +28,6 @@
  * @offset:    start of the TCS group relative to the TCSes in the RSC
  * @num_tcs:   number of TCSes in this type
  * @ncpt:      number of commands in each TCS
- * @lock:      lock for synchronizing this TCS writes
  * @req:       requests that are sent from the TCS
  * @cmd_cache: flattened cache of cmds in sleep/wake TCS
  * @slots:     indicates which of @cmd_addr are occupied
@@ -40,7 +39,6 @@
 	u32 offset;
 	int num_tcs;
 	int ncpt;
-	spinlock_t lock;
 	const struct tcs_request *req[MAX_TCS_PER_TYPE];
 	u32 *cmd_cache;
 	DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
@@ -90,6 +88,7 @@
  * @base:       start address of the RSC's DRV registers
  * @tcs_base:   start address of the TCS registers in this controller
  * @id:         instance id in the controller (Direct Resource Voter)
+ * @in_solver_mode: Controller is in solver mode
  * @num_tcs:    number of TCSes in this DRV
  * @tcs:        TCS groups
  * @tcs_in_use: s/w state of the TCS
@@ -102,6 +101,7 @@
 	void __iomem *base;
 	void __iomem *tcs_base;
 	int id;
+	bool in_solver_mode;
 	int num_tcs;
 	struct tcs_group tcs[TCS_TYPE_NR];
 	DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
@@ -118,6 +118,7 @@
 int rpmh_rsc_invalidate(struct rsc_drv *drv);
 bool rpmh_rsc_ctrlr_is_idle(struct rsc_drv *drv);
 int rpmh_rsc_write_pdc_data(struct rsc_drv *drv, const struct tcs_request *msg);
+void rpmh_rsc_mode_solver_set(struct rsc_drv *drv, bool enable);
 
 void rpmh_tx_done(const struct tcs_request *msg, int r);
 
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index 7f3be85..533e2e2 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -100,8 +100,7 @@
 
 static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
 {
-	return !test_bit(tcs_id, drv->tcs_in_use) &&
-	       read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
+	return !test_bit(tcs_id, drv->tcs_in_use);
 }
 
 static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
@@ -111,29 +110,28 @@
 
 static int tcs_invalidate(struct rsc_drv *drv, int type)
 {
-	int m;
+	int m, ret = 0;
 	struct tcs_group *tcs;
 
 	tcs = get_tcs_of_type(drv, type);
 
-	spin_lock(&tcs->lock);
-	if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
-		spin_unlock(&tcs->lock);
-		return 0;
-	}
+	spin_lock(&drv->lock);
+	if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS))
+		goto done;
 
 	for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
 		if (!tcs_is_free(drv, m)) {
-			spin_unlock(&tcs->lock);
-			return -EAGAIN;
+			ret = -EAGAIN;
+			goto done;
 		}
 		write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
 		write_tcs_reg_sync(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, m, 0);
 	}
 	bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
-	spin_unlock(&tcs->lock);
 
-	return 0;
+done:
+	spin_unlock(&drv->lock);
+	return ret;
 }
 
 /**
@@ -300,9 +298,7 @@
 		write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
 		write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, i, 0);
 		write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
-		spin_lock(&drv->lock);
 		clear_bit(i, drv->tcs_in_use);
-		spin_unlock(&drv->lock);
 		if (req)
 			rpmh_tx_done(req, err);
 	}
@@ -385,29 +381,29 @@
 {
 	struct tcs_group *tcs;
 	int tcs_id;
-	unsigned long flags;
 	int ret;
 
 	tcs = get_tcs_for_msg(drv, msg);
 	if (IS_ERR(tcs))
 		return PTR_ERR(tcs);
 
-	spin_lock_irqsave(&tcs->lock, flags);
 	spin_lock(&drv->lock);
+	if (msg->state == RPMH_ACTIVE_ONLY_STATE && drv->in_solver_mode) {
+		ret = -EINVAL;
+		goto done_write;
+	}
 	/*
 	 * The h/w does not like if we send a request to the same address,
 	 * when one is already in-flight or being processed.
 	 */
 	ret = check_for_req_inflight(drv, tcs, msg);
 	if (ret) {
-		spin_unlock(&drv->lock);
 		goto done_write;
 	}
 
 	tcs_id = find_free_tcs(tcs);
 	if (tcs_id < 0) {
 		ret = tcs_id;
-		spin_unlock(&drv->lock);
 		goto done_write;
 	}
 
@@ -415,13 +411,12 @@
 	set_bit(tcs_id, drv->tcs_in_use);
 	if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS)
 		enable_tcs_irq(drv, tcs_id, true);
-	spin_unlock(&drv->lock);
 
 	__tcs_buffer_write(drv, tcs_id, 0, msg);
 	__tcs_trigger(drv, tcs_id, true);
 
 done_write:
-	spin_unlock_irqrestore(&tcs->lock, flags);
+	spin_unlock(&drv->lock);
 	return ret;
 }
 
@@ -519,24 +514,47 @@
 {
 	struct tcs_group *tcs;
 	int tcs_id = 0, cmd_id = 0;
-	unsigned long flags;
 	int ret;
 
 	tcs = get_tcs_for_msg(drv, msg);
 	if (IS_ERR(tcs))
 		return PTR_ERR(tcs);
 
-	spin_lock_irqsave(&tcs->lock, flags);
+	spin_lock(&drv->lock);
 	/* find the TCS id and the command in the TCS to write to */
 	ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
 	if (!ret)
 		__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
-	spin_unlock_irqrestore(&tcs->lock, flags);
+	spin_unlock(&drv->lock);
 
 	return ret;
 }
 
 /**
+ *  rpmh_rsc_mode_solver_set: Enable/disable solver mode
+ *
+ *  @drv: The controller
+ *
+ *  enable: boolean state to be set - true/false
+ */
+void rpmh_rsc_mode_solver_set(struct rsc_drv *drv, bool enable)
+{
+	int m;
+	struct tcs_group *tcs = get_tcs_of_type(drv, ACTIVE_TCS);
+
+again:
+	spin_lock(&drv->lock);
+	for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
+		if (!tcs_is_free(drv, m)) {
+			spin_unlock(&drv->lock);
+			goto again;
+		}
+	}
+	drv->in_solver_mode = enable;
+	spin_unlock(&drv->lock);
+}
+
+/**
  *  rpmh_rsc_ctrlr_is_idle: Check if any of the AMCs are busy.
  *
  *  @drv: The controller
@@ -760,7 +778,6 @@
 		tcs->type = tcs_cfg[i].type;
 		tcs->num_tcs = tcs_cfg[i].n;
 		tcs->ncpt = ncpt;
-		spin_lock_init(&tcs->lock);
 
 		if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
 			continue;
@@ -829,6 +846,7 @@
 		return ret;
 
 	spin_lock_init(&drv->lock);
+	drv->in_solver_mode = false;
 	bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
 
 	irq = platform_get_irq(pdev, drv->id);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index 4e3bb21..9573aa13 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -78,14 +78,13 @@
 
 static int check_ctrlr_state(struct rpmh_ctrlr *ctrlr, enum rpmh_state state)
 {
-	unsigned long flags;
 	int ret = 0;
 
 	/* Do not allow setting active votes when in solver mode */
-	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	spin_lock(&ctrlr->cache_lock);
 	if (ctrlr->in_solver_mode && state == RPMH_ACTIVE_ONLY_STATE)
 		ret = -EBUSY;
-	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+	spin_unlock(&ctrlr->cache_lock);
 
 	return ret;
 }
@@ -103,18 +102,11 @@
 int rpmh_mode_solver_set(const struct device *dev, bool enable)
 {
 	struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
-	unsigned long flags;
 
-	for (;;) {
-		spin_lock_irqsave(&ctrlr->cache_lock, flags);
-		if (rpmh_rsc_ctrlr_is_idle(ctrlr_to_drv(ctrlr))) {
-			ctrlr->in_solver_mode = enable;
-			spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-			break;
-		}
-		spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
-		udelay(10);
-	}
+	spin_lock(&ctrlr->cache_lock);
+	rpmh_rsc_mode_solver_set(ctrlr_to_drv(ctrlr), enable);
+	ctrlr->in_solver_mode = enable;
+	spin_unlock(&ctrlr->cache_lock);
 
 	return 0;
 }
@@ -162,9 +154,8 @@
 					   struct tcs_cmd *cmd)
 {
 	struct cache_req *req;
-	unsigned long flags;
 
-	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	spin_lock(&ctrlr->cache_lock);
 	req = __find_req(ctrlr, cmd->addr);
 	if (req)
 		goto existing;
@@ -198,7 +189,7 @@
 
 	ctrlr->dirty = true;
 unlock:
-	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+	spin_unlock(&ctrlr->cache_lock);
 
 	return req;
 }
@@ -237,9 +228,8 @@
 		WARN_ON(irqs_disabled());
 		ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
 	} else {
-		ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
-				&rpm_msg->msg);
 		/* Clean up our call by spoofing tx_done */
+		ret = 0;
 		rpmh_tx_done(&rpm_msg->msg, ret);
 	}
 
@@ -348,23 +338,21 @@
 
 static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
 {
-	unsigned long flags;
 
-	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	spin_lock(&ctrlr->cache_lock);
 	list_add_tail(&req->list, &ctrlr->batch_cache);
-	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+	spin_unlock(&ctrlr->cache_lock);
 }
 
 static int flush_batch(struct rpmh_ctrlr *ctrlr)
 {
 	struct batch_cache_req *req;
 	const struct rpmh_request *rpm_msg;
-	unsigned long flags;
 	int ret = 0;
 	int i;
 
 	/* Send Sleep/Wake requests to the controller, expect no response */
-	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	spin_lock(&ctrlr->cache_lock);
 	list_for_each_entry(req, &ctrlr->batch_cache, list) {
 		for (i = 0; i < req->count; i++) {
 			rpm_msg = req->rpm_msgs + i;
@@ -374,7 +362,7 @@
 				break;
 		}
 	}
-	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+	spin_unlock(&ctrlr->cache_lock);
 
 	return ret;
 }
@@ -382,13 +370,12 @@
 static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
 {
 	struct batch_cache_req *req, *tmp;
-	unsigned long flags;
 
-	spin_lock_irqsave(&ctrlr->cache_lock, flags);
+	spin_lock(&ctrlr->cache_lock);
 	list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
 		kfree(req);
 	INIT_LIST_HEAD(&ctrlr->batch_cache);
-	spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+	spin_unlock(&ctrlr->cache_lock);
 }
 
 /**
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 3be4b54..d92f2df8 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -350,6 +350,7 @@
 	return reg_count;
 }
 
+#if defined(CONFIG_QCOM_BUS_SCALING)
 static int of_read_bus_pdata(struct platform_device *pdev,
 			     struct pil_tz_data *d)
 {
@@ -366,6 +367,34 @@
 
 	return 0;
 }
+static int do_bus_scaling_request(struct pil_desc *pil, int enable)
+{
+	int rc;
+	struct pil_tz_data *d = desc_to_data(pil);
+
+	if (d->bus_client) {
+		rc = msm_bus_scale_client_update_request(d->bus_client, enable);
+		if (rc) {
+			dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+									rc);
+			return rc;
+		}
+	} else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
+	return 0;
+}
+#else
+static int of_read_bus_pdata(struct platform_device *pdev,
+			     struct pil_tz_data *d)
+{
+	return 0;
+}
+static int do_bus_scaling_request(struct pil_desc *pil, int enable)
+{
+	return 0;
+}
+#endif
 
 static int piltz_resc_init(struct platform_device *pdev, struct pil_tz_data *d)
 {
@@ -532,16 +561,9 @@
 	if (d->subsys_desc.no_auth)
 		return 0;
 
-	if (d->bus_client) {
-		rc = msm_bus_scale_client_update_request(d->bus_client, 1);
-		if (rc) {
-			dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
-									rc);
-			return rc;
-		}
-	} else
-		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
-					d->subsys_desc.name);
+	rc = do_bus_scaling_request(pil, 1);
+	if (rc)
+		return rc;
 
 	rc = enable_regulators(d, pil->dev, d->proxy_regs,
 					d->proxy_reg_count, false);
@@ -572,11 +594,7 @@
 
 	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, true);
 
-	if (d->bus_client)
-		msm_bus_scale_client_update_request(d->bus_client, 0);
-	else
-		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
-					d->subsys_desc.name);
+	do_bus_scaling_request(pil, 0);
 }
 
 static int pil_init_image_trusted(struct pil_desc *pil,
@@ -705,16 +723,9 @@
 	desc.args[0] = proc = d->pas_id;
 	desc.arginfo = SCM_ARGS(1);
 
-	if (d->bus_client) {
-		rc = msm_bus_scale_client_update_request(d->bus_client, 1);
-		if (rc) {
-			dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
-									rc);
-			return rc;
-		}
-	} else
-		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
-					d->subsys_desc.name);
+	rc = do_bus_scaling_request(pil, 1);
+	if (rc)
+		return rc;
 
 	rc = enable_regulators(d, pil->dev, d->proxy_regs,
 					d->proxy_reg_count, true);
@@ -732,11 +743,8 @@
 
 	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
 	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
-	if (d->bus_client)
-		msm_bus_scale_client_update_request(d->bus_client, 0);
-	else
-		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
-					d->subsys_desc.name);
+
+	do_bus_scaling_request(pil, 0);
 
 	if (rc)
 		return rc;
@@ -749,11 +757,8 @@
 err_clks:
 	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
 err_regulators:
-	if (d->bus_client)
-		msm_bus_scale_client_update_request(d->bus_client, 0);
-	else
-		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
-					d->subsys_desc.name);
+	do_bus_scaling_request(pil, 0);
+
 	return rc;
 }
 
diff --git a/drivers/soc/qcom/wda_qmi.c b/drivers/soc/qcom/wda_qmi.c
new file mode 100644
index 0000000..f3f2d19
--- /dev/null
+++ b/drivers/soc/qcom/wda_qmi.c
@@ -0,0 +1,429 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/soc/qcom/qmi.h>
+#include <soc/qcom/rmnet_qmi.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/wda.h>
+#include "qmi_rmnet_i.h"
+
+struct wda_qmi_data {
+	void *rmnet_port;
+	struct workqueue_struct *wda_wq;
+	struct work_struct svc_arrive;
+	struct qmi_handle handle;
+	struct sockaddr_qrtr ssctl;
+};
+
+static void wda_svc_config(struct work_struct *work);
+/* **************************************************** */
+#define WDA_SERVICE_ID_V01 0x1A
+#define WDA_SERVICE_VERS_V01 0x01
+#define WDA_TIMEOUT_MS  20
+
+#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D
+#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D
+#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN 18
+#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN 14
+
+#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01 0x002E
+#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01 0x002E
+#define QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN 4
+#define QMI_WDA_SET_POWERSAVE_MODE_RESP_V01_MAX_MSG_LEN 7
+
+enum wda_powersave_config_mask_enum_v01 {
+	WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	WDA_DATA_POWERSAVE_CONFIG_NOT_SUPPORTED = 0x00,
+	WDA_DATA_POWERSAVE_CONFIG_DL_MARKER_V01 = 0x01,
+	WDA_DATA_POWERSAVE_CONFIG_FLOW_CTL_V01 = 0x02,
+	WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01 = 0x7FFFFFFF,
+	WDA_DATA_POWERSAVE_CONFIG_MASK_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+struct wda_set_powersave_config_req_msg_v01 {
+	/* Mandatory */
+	struct data_ep_id_type_v01 ep_id;
+	/* Optional */
+	uint8_t req_data_cfg_valid;
+	enum wda_powersave_config_mask_enum_v01 req_data_cfg;
+};
+
+struct wda_set_powersave_config_resp_msg_v01 {
+	/* Mandatory */
+	struct qmi_response_type_v01 resp;
+	/* Optional */
+	uint8_t data_cfg_valid;
+	enum wda_powersave_config_mask_enum_v01 data_cfg;
+};
+
+struct wda_set_powersave_mode_req_msg_v01 {
+	/* Mandatory */
+	uint8_t powersave_control_flag;
+};
+
+struct wda_set_powersave_mode_resp_msg_v01 {
+	/* Mandatory */
+	struct qmi_response_type_v01 resp;
+};
+
+static struct qmi_elem_info wda_set_powersave_config_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct data_ep_id_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(struct
+				wda_set_powersave_config_req_msg_v01,
+				ep_id),
+		.ei_array	= data_ep_id_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+				wda_set_powersave_config_req_msg_v01,
+				req_data_cfg_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum
+					 wda_powersave_config_mask_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+				wda_set_powersave_config_req_msg_v01,
+				req_data_cfg),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info wda_set_powersave_config_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					wda_set_powersave_config_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					wda_set_powersave_config_resp_msg_v01,
+					   data_cfg_valid),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(enum
+					 wda_powersave_config_mask_enum_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(struct
+					wda_set_powersave_config_resp_msg_v01,
+					   data_cfg),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info wda_set_powersave_mode_req_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_req_msg_v01,
+					   powersave_control_flag),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct qmi_elem_info wda_set_powersave_mode_resp_msg_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.array_type	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+					   wda_set_powersave_mode_resp_msg_v01,
+					   resp),
+		.ei_array	= qmi_response_type_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.array_type	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static int wda_set_powersave_mode_req(void *wda_data, uint8_t enable)
+{
+	struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
+	struct wda_set_powersave_mode_resp_msg_v01 *resp;
+	struct wda_set_powersave_mode_req_msg_v01  *req;
+	struct qmi_txn txn;
+	int ret;
+
+	if (!data || !data->rmnet_port)
+		return -EINVAL;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(&data->handle, &txn,
+			   wda_set_powersave_mode_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	req->powersave_control_flag = enable;
+	ret = qmi_send_request(&data->handle, &data->ssctl, &txn,
+			QMI_WDA_SET_POWERSAVE_MODE_REQ_V01,
+			QMI_WDA_SET_POWERSAVE_MODE_REQ_V01_MAX_MSG_LEN,
+			wda_set_powersave_mode_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		pr_err("%s() Failed sending request, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, err: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
+					struct qmi_info *qmi)
+{
+	struct wda_qmi_data *data = container_of(wda_handle,
+						 struct wda_qmi_data, handle);
+	struct wda_set_powersave_config_resp_msg_v01 *resp;
+	struct wda_set_powersave_config_req_msg_v01  *req;
+	struct qmi_txn txn;
+	int ret;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	ret = qmi_txn_init(wda_handle, &txn,
+			   wda_set_powersave_config_resp_msg_v01_ei, resp);
+	if (ret < 0) {
+		pr_err("%s() Failed init for response, err: %d\n",
+			__func__, ret);
+		goto out;
+	}
+
+	req->ep_id.ep_type = qmi->fc_info[0].svc.ep_type;
+	req->ep_id.iface_id = qmi->fc_info[0].svc.iface_id;
+	req->req_data_cfg_valid = 1;
+	req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01;
+	ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
+			QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01,
+			QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01_MAX_MSG_LEN,
+			wda_set_powersave_config_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		pr_err("%s() Failed sending request, err: %d\n", __func__, ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
+	if (ret < 0) {
+		pr_err("%s() Response waiting failed, err: %d\n",
+			__func__, ret);
+	} else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		pr_err("%s() Request rejected, result: %d, error: %d\n",
+			__func__, resp->resp.result, resp->resp.error);
+		ret = -resp->resp.result;
+	}
+
+out:
+	kfree(resp);
+	kfree(req);
+	return ret;
+}
+
+static void wda_svc_config(struct work_struct *work)
+{
+	struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
+						 svc_arrive);
+	struct qmi_info *qmi;
+
+	qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
+	if (!qmi)
+		goto clean_out;
+
+	if (wda_set_powersave_config_req(&data->handle, qmi) < 0) {
+		pr_err("%s() failed, qmi handle pt: %p\n",
+			__func__, &data->handle);
+		goto clean_out;
+	}
+
+	trace_wda_client_state_up(qmi->fc_info[0].svc.instance,
+				  qmi->fc_info[0].svc.ep_type,
+				  qmi->fc_info[0].svc.iface_id);
+	qmi->wda_client = (void *)data;
+	pr_info("Connection established with the WDA Service\n");
+	return;
+
+clean_out:
+	qmi_handle_release(&data->handle);
+	destroy_workqueue(data->wda_wq);
+	kfree(data);
+}
+
+static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
+						 handle);
+
+	data->ssctl.sq_family = AF_QIPCRTR;
+	data->ssctl.sq_node = svc->node;
+	data->ssctl.sq_port = svc->port;
+
+	queue_work(data->wda_wq, &data->svc_arrive);
+
+	return 0;
+}
+
+static void wda_svc_exit(struct qmi_handle *qmi, struct qmi_service *svc)
+{
+	struct wda_qmi_data *data = container_of(qmi, struct wda_qmi_data,
+						 handle);
+
+	if (!data)
+		pr_info("%s() data is null\n", __func__);
+}
+
+static struct qmi_ops server_ops = {
+	.new_server = wda_svc_arrive,
+	.del_server = wda_svc_exit,
+};
+
+int wda_qmi_client_init(void *port, uint32_t instance)
+{
+	struct wda_qmi_data *data;
+	int rc = 0;
+
+	if (!port)
+		return -EINVAL;
+
+	pr_info("%s\n", __func__);
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->wda_wq = create_singlethread_workqueue("wda_wq");
+	if (!data->wda_wq) {
+		pr_err("%s Could not create workqueue\n", __func__);
+		kfree(data);
+		return -ENOMEM;
+	}
+
+	data->rmnet_port = port;
+	INIT_WORK(&data->svc_arrive, wda_svc_config);
+
+	rc = qmi_handle_init(&data->handle,
+			     QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01_MAX_MSG_LEN,
+			     &server_ops, NULL);
+	if (rc < 0) {
+		pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc);
+		kfree(data);
+		return rc;
+	}
+
+	rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01,
+			    WDA_SERVICE_VERS_V01, instance);
+	if (rc < 0) {
+		pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc);
+		qmi_handle_release(&data->handle);
+		destroy_workqueue(data->wda_wq);
+		kfree(data);
+	}
+
+	return rc;
+}
+
+void wda_qmi_client_exit(void *wda_data)
+{
+	struct wda_qmi_data *data = (struct wda_qmi_data *)wda_data;
+
+	if (!data) {
+		pr_info("%s() data is null\n", __func__);
+		return;
+	}
+
+	trace_wda_client_state_down(0);
+	qmi_handle_release(&data->handle);
+	destroy_workqueue(data->wda_wq);
+	kfree(data);
+}
+
+int wda_set_powersave_mode(void *wda_data, uint8_t enable)
+{
+	trace_wda_set_powersave_mode(enable);
+	return wda_set_powersave_mode_req(wda_data, enable);
+}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 987fc5b..953dff4 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -173,6 +173,8 @@
 
 source "drivers/usb/roles/Kconfig"
 
+source "drivers/usb/pd/Kconfig"
+
 config USB_LED_TRIG
 	bool "USB LED Triggers"
 	depends on LEDS_CLASS && LEDS_TRIGGERS
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 7d1b8c8..0dc6119 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -67,3 +67,5 @@
 obj-$(CONFIG_TYPEC)		+= typec/
 
 obj-$(CONFIG_USB_ROLE_SWITCH)	+= roles/
+
+obj-$(CONFIG_USB_PD)		+=pd/
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index e60a4d8..a1a3ce7 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -35,6 +35,8 @@
 static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep);
 static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
 		struct dwc3_ep *dep, struct dwc3_request *req);
+static int dwc3_ep0_delegate_req(struct dwc3 *dwc,
+		struct usb_ctrlrequest *ctrl);
 
 static void dwc3_ep0_prepare_one_trb(struct dwc3_ep *dep,
 		dma_addr_t buf_dma, u32 len, u32 type, bool chain)
@@ -322,12 +324,14 @@
 static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
 {
 }
+
 /*
  * ch 9.4.5
  */
 static int dwc3_ep0_handle_status(struct dwc3 *dwc,
 		struct usb_ctrlrequest *ctrl)
 {
+	int ret;
 	struct dwc3_ep		*dep;
 	u32			recip;
 	u32			value;
@@ -367,7 +371,8 @@
 		 * Function Remote Wake Capable	D0
 		 * Function Remote Wakeup	D1
 		 */
-		break;
+		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+		return ret;
 
 	case USB_RECIP_ENDPOINT:
 		dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
@@ -524,6 +529,9 @@
 		 * For now, we're not doing anything, just making sure we return
 		 * 0 so USB Command Verifier tests pass without any errors.
 		 */
+		ret = dwc3_ep0_delegate_req(dwc, ctrl);
+		if (ret)
+			return ret;
 		break;
 	default:
 		ret = -EINVAL;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 1074cb8..6aada1c 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2222,6 +2222,8 @@
 			fsg->bulk_out_enabled = 0;
 		}
 
+		/* allow usb LPM after eps are disabled */
+		usb_gadget_autopm_put_async(common->gadget);
 		common->fsg = NULL;
 		wake_up(&common->fsg_wait);
 	}
@@ -2286,6 +2288,10 @@
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
 	fsg->common->new_fsg = fsg;
+
+	/* prevents usb LPM until thread runs to completion */
+	usb_gadget_autopm_get_async(fsg->common->gadget);
+
 	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
 	return USB_GADGET_DELAYED_STATUS;
 }
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index c2dfc6f..59d0bd1 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -1029,7 +1029,7 @@
 	rndis->port.func.disable = rndis_disable;
 	rndis->port.func.free_func = rndis_free;
 
-	params = rndis_register(rndis_response_available, rndis);
+	params = rndis_register(rndis_response_available, rndis, NULL);
 	if (IS_ERR(params)) {
 		kfree(rndis);
 		return ERR_CAST(params);
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 57a152f..ad86ed5 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -579,7 +579,7 @@
 		+ sizeof(struct ethhdr)
 		+ sizeof(struct rndis_packet_msg_type)
 		+ 22));
-	resp->PacketAlignmentFactor = cpu_to_le32(0);
+	resp->PacketAlignmentFactor = cpu_to_le32(params->pkt_alignment_factor);
 	resp->AFListOffset = cpu_to_le32(0);
 	resp->AFListSize = cpu_to_le32(0);
 
@@ -806,18 +806,33 @@
 	/* For USB: responses may take up to 10 seconds */
 	switch (MsgType) {
 	case RNDIS_MSG_INIT:
-		pr_debug("%s: RNDIS_MSG_INIT\n",
-			__func__);
+		pr_debug("%s: RNDIS_MSG_INIT\n", __func__);
+		tmp++; /* to get RequestID */
+		params->host_rndis_major_ver = get_unaligned_le32(tmp++);
+		params->host_rndis_minor_ver = get_unaligned_le32(tmp++);
+		params->dl_max_xfer_size = get_unaligned_le32(tmp++);
+
+		pr_debug("%s(): RNDIS Host Major:%d Minor:%d version\n",
+					__func__, params->host_rndis_major_ver,
+					params->host_rndis_minor_ver);
+		pr_debug("%s(): DL Max Transfer size:%x\n",
+				__func__, params->dl_max_xfer_size);
 		params->state = RNDIS_INITIALIZED;
 		return rndis_init_response(params, (rndis_init_msg_type *)buf);
 
 	case RNDIS_MSG_HALT:
 		pr_debug("%s: RNDIS_MSG_HALT\n",
 			__func__);
-		params->state = RNDIS_UNINITIALIZED;
-		if (params->dev) {
-			netif_carrier_off(params->dev);
-			netif_stop_queue(params->dev);
+		if (params->state == RNDIS_DATA_INITIALIZED) {
+			if (params->flow_ctrl_enable) {
+				params->flow_ctrl_enable(true, params);
+			} else {
+				if (params->dev) {
+					netif_carrier_off(params->dev);
+					netif_stop_queue(params->dev);
+				}
+			}
+			params->state = RNDIS_UNINITIALIZED;
 		}
 		return 0;
 
@@ -872,7 +887,8 @@
 	ida_simple_remove(&rndis_ida, nr);
 }
 
-struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
+struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v,
+	void (*flow_ctrl_enable)(bool enable, struct rndis_params *params))
 {
 	struct rndis_params *params;
 	int i;
@@ -916,6 +932,7 @@
 	params->state = RNDIS_UNINITIALIZED;
 	params->media_state = RNDIS_MEDIA_STATE_DISCONNECTED;
 	params->resp_avail = resp_avail;
+	params->flow_ctrl_enable = flow_ctrl_enable;
 	params->v = v;
 	INIT_LIST_HEAD(&params->resp_queue);
 	pr_debug("%s: configNr = %d\n", __func__, i);
@@ -999,6 +1016,49 @@
 	params->max_pkt_per_xfer = max_pkt_per_xfer;
 }
 
+/**
+ * rndis_flow_control: enable/disable flow control with USB RNDIS interface
+ * params - RNDIS network parameter
+ * enable_flow_control - true: perform flow control, false: disable flow control
+ *
+ * In hw accelerated mode, this function triggers functionality to start/stop
+ * endless transfers, otherwise it enables/disables RNDIS network interface.
+ */
+void rndis_flow_control(struct rndis_params *params, bool enable_flow_control)
+{
+	if (!params) {
+		pr_err("%s: failed, params NULL\n", __func__);
+		return;
+	}
+
+	pr_debug("%s(): params->state:%x\n", __func__, params->state);
+
+	if (enable_flow_control) {
+		if (params->state == RNDIS_DATA_INITIALIZED) {
+			if (params->flow_ctrl_enable) {
+				params->flow_ctrl_enable(enable_flow_control,
+								params);
+			} else {
+				netif_carrier_off(params->dev);
+				netif_stop_queue(params->dev);
+			}
+		}
+		params->state = RNDIS_INITIALIZED;
+	} else {
+		if (params->state != RNDIS_DATA_INITIALIZED) {
+			if (params->flow_ctrl_enable) {
+				params->flow_ctrl_enable(enable_flow_control,
+								params);
+			} else {
+				netif_carrier_on(params->dev);
+				if (netif_running(params->dev))
+					netif_wake_queue(params->dev);
+			}
+		}
+		params->state = RNDIS_DATA_INITIALIZED;
+	}
+}
+
 void rndis_add_hdr(struct sk_buff *skb)
 {
 	struct rndis_packet_msg_type *header;
@@ -1128,6 +1188,19 @@
 }
 EXPORT_SYMBOL_GPL(rndis_rm_hdr);
 
+void rndis_set_pkt_alignment_factor(struct rndis_params *params,
+		u8 pkt_alignment_factor)
+{
+	pr_debug("%s:\n", __func__);
+
+	if (!params) {
+		pr_err("%s: failed, params NULL\n", __func__);
+		return;
+	}
+
+	params->pkt_alignment_factor = pkt_alignment_factor;
+}
+
 #ifdef CONFIG_USB_GADGET_DEBUG_FILES
 
 static int rndis_proc_show(struct seq_file *m, void *v)
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index 4bd6dc9..de61b7b 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -171,15 +171,23 @@
 
 	u32			vendorID;
 	u8			max_pkt_per_xfer;
+	u32			host_rndis_major_ver;
+	u32			host_rndis_minor_ver;
+	u32			dl_max_xfer_size;
 	const char		*vendorDescr;
+	u8			pkt_alignment_factor;
 	void			(*resp_avail)(void *v);
+	void			(*flow_ctrl_enable)(bool enable,
+			struct rndis_params *params);
+
 	void			*v;
 	struct list_head	resp_queue;
 } rndis_params;
 
 /* RNDIS Message parser and other useless functions */
 int  rndis_msg_parser(struct rndis_params *params, u8 *buf);
-struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v);
+struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v,
+	void (*flow_ctrl_enable)(bool enable, struct rndis_params *params));
 void rndis_deregister(struct rndis_params *params);
 int  rndis_set_param_dev(struct rndis_params *params, struct net_device *dev,
 			 u16 *cdc_filter);
@@ -199,5 +207,8 @@
 int  rndis_signal_disconnect(struct rndis_params *params);
 int  rndis_state(struct rndis_params *params);
 extern void rndis_set_host_mac(struct rndis_params *params, const u8 *addr);
+void rndis_flow_control(struct rndis_params *params, bool enable_flow_control);
+void rndis_set_pkt_alignment_factor(struct rndis_params *params,
+		u8 pkt_alignment_factor);
 
 #endif  /* _LINUX_RNDIS_H */
diff --git a/drivers/usb/pd/Kconfig b/drivers/usb/pd/Kconfig
new file mode 100644
index 0000000..10e9efe
--- /dev/null
+++ b/drivers/usb/pd/Kconfig
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# USB Power Delivery driver configuration
+#
+menu "USB Power Delivery"
+
+config USB_PD
+	def_bool n
+
+config USB_PD_POLICY
+	tristate "USB Power Delivery Protocol and Policy Engine"
+	depends on EXTCON
+	depends on DUAL_ROLE_USB_INTF
+	select USB_PD
+	help
+          Say Y here to enable USB PD protocol and policy engine.
+	  This driver provides a class that implements the upper
+	  layers of the USB Power Delivery stack. It requires a
+	  PD PHY driver in order to transmit and receive PD
+	  messages on its behalf.
+
+config QPNP_USB_PDPHY
+	tristate "QPNP USB Power Delivery PHY"
+	depends on SPMI
+	help
+          Say Y here to enable QPNP USB PD PHY peripheral driver
+	  which communicates over the SPMI bus.
+	  The is used to handle the PHY layer communication of the
+	  Power Delivery stack.
+
+endmenu
diff --git a/drivers/usb/pd/Makefile b/drivers/usb/pd/Makefile
new file mode 100644
index 0000000..27bd359
--- /dev/null
+++ b/drivers/usb/pd/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for USB Power Delivery drivers
+#
+
+obj-$(CONFIG_USB_PD_POLICY)	+= policy_engine.o
+obj-$(CONFIG_QPNP_USB_PDPHY)	+= qpnp-pdphy.o
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
new file mode 100644
index 0000000..591f65a
--- /dev/null
+++ b/drivers/usb/pd/policy_engine.c
@@ -0,0 +1,4475 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/hrtimer.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/extcon.h>
+#include <linux/usb/class-dual-role.h>
+#include <linux/usb/usbpd.h>
+#include "usbpd.h"
+
+enum usbpd_state {
+	PE_UNKNOWN,
+	PE_ERROR_RECOVERY,
+	PE_SRC_DISABLED,
+	PE_SRC_STARTUP,
+	PE_SRC_STARTUP_WAIT_FOR_VDM_RESP,
+	PE_SRC_SEND_CAPABILITIES,
+	PE_SRC_SEND_CAPABILITIES_WAIT, /* substate to wait for Request */
+	PE_SRC_NEGOTIATE_CAPABILITY,
+	PE_SRC_TRANSITION_SUPPLY,
+	PE_SRC_READY,
+	PE_SRC_HARD_RESET,
+	PE_SRC_SOFT_RESET,
+	PE_SRC_DISCOVERY,
+	PE_SRC_TRANSITION_TO_DEFAULT,
+	PE_SNK_STARTUP,
+	PE_SNK_DISCOVERY,
+	PE_SNK_WAIT_FOR_CAPABILITIES,
+	PE_SNK_EVALUATE_CAPABILITY,
+	PE_SNK_SELECT_CAPABILITY,
+	PE_SNK_TRANSITION_SINK,
+	PE_SNK_READY,
+	PE_SNK_HARD_RESET,
+	PE_SNK_SOFT_RESET,
+	PE_SNK_TRANSITION_TO_DEFAULT,
+	PE_DRS_SEND_DR_SWAP,
+	PE_PRS_SNK_SRC_SEND_SWAP,
+	PE_PRS_SNK_SRC_TRANSITION_TO_OFF,
+	PE_PRS_SNK_SRC_SOURCE_ON,
+	PE_PRS_SRC_SNK_SEND_SWAP,
+	PE_PRS_SRC_SNK_TRANSITION_TO_OFF,
+	PE_PRS_SRC_SNK_WAIT_SOURCE_ON,
+	PE_SEND_SOFT_RESET,
+	PE_VCS_WAIT_FOR_VCONN,
+	PE_MAX_STATES,
+};
+
+static const char * const usbpd_state_strings[] = {
+	"UNKNOWN",
+	"ERROR_RECOVERY",
+	"SRC_Disabled",
+	"SRC_Startup",
+	"SRC_Startup_Wait_for_VDM_Resp",
+	"SRC_Send_Capabilities",
+	"SRC_Send_Capabilities (Wait for Request)",
+	"SRC_Negotiate_Capability",
+	"SRC_Transition_Supply",
+	"SRC_Ready",
+	"SRC_Hard_Reset",
+	"SRC_Soft_Reset",
+	"SRC_Discovery",
+	"SRC_Transition_to_default",
+	"SNK_Startup",
+	"SNK_Discovery",
+	"SNK_Wait_for_Capabilities",
+	"SNK_Evaluate_Capability",
+	"SNK_Select_Capability",
+	"SNK_Transition_Sink",
+	"SNK_Ready",
+	"SNK_Hard_Reset",
+	"SNK_Soft_Reset",
+	"SNK_Transition_to_default",
+	"DRS_Send_DR_Swap",
+	"PRS_SNK_SRC_Send_Swap",
+	"PRS_SNK_SRC_Transition_to_off",
+	"PRS_SNK_SRC_Source_on",
+	"PRS_SRC_SNK_Send_Swap",
+	"PRS_SRC_SNK_Transition_to_off",
+	"PRS_SRC_SNK_Wait_Source_on",
+	"Send_Soft_Reset",
+	"VCS_Wait_for_VCONN",
+};
+
+enum usbpd_control_msg_type {
+	MSG_RESERVED = 0,
+	MSG_GOODCRC,
+	MSG_GOTOMIN,
+	MSG_ACCEPT,
+	MSG_REJECT,
+	MSG_PING,
+	MSG_PS_RDY,
+	MSG_GET_SOURCE_CAP,
+	MSG_GET_SINK_CAP,
+	MSG_DR_SWAP,
+	MSG_PR_SWAP,
+	MSG_VCONN_SWAP,
+	MSG_WAIT,
+	MSG_SOFT_RESET,
+	MSG_NOT_SUPPORTED = 0x10,
+	MSG_GET_SOURCE_CAP_EXTENDED,
+	MSG_GET_STATUS,
+	MSG_FR_SWAP,
+	MSG_GET_PPS_STATUS,
+	MSG_GET_COUNTRY_CODES,
+};
+
+static const char * const usbpd_control_msg_strings[] = {
+	"", "GoodCRC", "GotoMin", "Accept", "Reject", "Ping", "PS_RDY",
+	"Get_Source_Cap", "Get_Sink_Cap", "DR_Swap", "PR_Swap", "VCONN_Swap",
+	"Wait", "Soft_Reset", "", "", "Not_Supported",
+	"Get_Source_Cap_Extended", "Get_Status", "FR_Swap", "Get_PPS_Status",
+	"Get_Country_Codes",
+};
+
+enum usbpd_data_msg_type {
+	MSG_SOURCE_CAPABILITIES = 1,
+	MSG_REQUEST,
+	MSG_BIST,
+	MSG_SINK_CAPABILITIES,
+	MSG_BATTERY_STATUS,
+	MSG_ALERT,
+	MSG_GET_COUNTRY_INFO,
+	MSG_VDM = 0xF,
+};
+
+static const char * const usbpd_data_msg_strings[] = {
+	"", "Source_Capabilities", "Request", "BIST", "Sink_Capabilities",
+	"Battery_Status", "Alert", "Get_Country_Info", "", "", "", "", "", "",
+	"", "Vendor_Defined",
+};
+
+enum usbpd_ext_msg_type {
+	MSG_SOURCE_CAPABILITIES_EXTENDED = 1,
+	MSG_STATUS,
+	MSG_GET_BATTERY_CAP,
+	MSG_GET_BATTERY_STATUS,
+	MSG_BATTERY_CAPABILITIES,
+	MSG_GET_MANUFACTURER_INFO,
+	MSG_MANUFACTURER_INFO,
+	MSG_SECURITY_REQUEST,
+	MSG_SECURITY_RESPONSE,
+	MSG_FIRMWARE_UPDATE_REQUEST,
+	MSG_FIRMWARE_UPDATE_RESPONSE,
+	MSG_PPS_STATUS,
+	MSG_COUNTRY_INFO,
+	MSG_COUNTRY_CODES,
+};
+
+static const char * const usbpd_ext_msg_strings[] = {
+	"", "Source_Capabilities_Extended", "Status", "Get_Battery_Cap",
+	"Get_Battery_Status", "Get_Manufacturer_Info", "Manufacturer_Info",
+	"Security_Request", "Security_Response", "Firmware_Update_Request",
+	"Firmware_Update_Response", "PPS_Status", "Country_Info",
+	"Country_Codes",
+};
+
+static inline const char *msg_to_string(u8 id, bool is_data, bool is_ext)
+{
+	if (is_ext) {
+		if (id < ARRAY_SIZE(usbpd_ext_msg_strings))
+			return usbpd_ext_msg_strings[id];
+	} else if (is_data) {
+		if (id < ARRAY_SIZE(usbpd_data_msg_strings))
+			return usbpd_data_msg_strings[id];
+	} else if (id < ARRAY_SIZE(usbpd_control_msg_strings)) {
+		return usbpd_control_msg_strings[id];
+	}
+
+	return "Invalid";
+}
+
+enum vdm_state {
+	VDM_NONE,
+	DISCOVERED_ID,
+	DISCOVERED_SVIDS,
+	DISCOVERED_MODES,
+	MODE_ENTERED,
+	MODE_EXITED,
+};
+
+static void *usbpd_ipc_log;
+#define usbpd_dbg(dev, fmt, ...) do { \
+	ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+			##__VA_ARGS__); \
+	dev_dbg(dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define usbpd_info(dev, fmt, ...) do { \
+	ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+			##__VA_ARGS__); \
+	dev_info(dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define usbpd_warn(dev, fmt, ...) do { \
+	ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+			##__VA_ARGS__); \
+	dev_warn(dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define usbpd_err(dev, fmt, ...) do { \
+	ipc_log_string(usbpd_ipc_log, "%s: %s: " fmt, dev_name(dev), __func__, \
+			##__VA_ARGS__); \
+	dev_err(dev, fmt, ##__VA_ARGS__); \
+	} while (0)
+
+#define NUM_LOG_PAGES		10
+
+/* Timeouts (in ms) */
+#define ERROR_RECOVERY_TIME	25
+#define SENDER_RESPONSE_TIME	26
+#define SINK_WAIT_CAP_TIME	500
+#define PS_TRANSITION_TIME	450
+#define SRC_CAP_TIME		120
+#define SRC_TRANSITION_TIME	25
+#define SRC_RECOVER_TIME	750
+#define PS_HARD_RESET_TIME	25
+#define PS_SOURCE_ON		400
+#define PS_SOURCE_OFF		750
+#define FIRST_SOURCE_CAP_TIME	200
+#define VDM_BUSY_TIME		50
+#define VCONN_ON_TIME		100
+
+/* tPSHardReset + tSafe0V */
+#define SNK_HARD_RESET_VBUS_OFF_TIME	(35 + 650)
+
+/* tSrcRecover + tSrcTurnOn */
+#define SNK_HARD_RESET_VBUS_ON_TIME	(1000 + 275)
+
+#define PD_CAPS_COUNT		50
+
+#define PD_MAX_MSG_ID		7
+
+#define PD_MAX_DATA_OBJ		7
+
+#define PD_SRC_CAP_EXT_DB_LEN	24
+#define PD_STATUS_DB_LEN	5
+#define PD_BATTERY_CAP_DB_LEN	9
+
+#define PD_MAX_EXT_MSG_LEN		260
+#define PD_MAX_EXT_MSG_LEGACY_LEN	26
+
+#define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
+	(((type) & 0x1F) | ((dr) << 5) | (rev << 6) | \
+	 ((pr) << 8) | ((id) << 9) | ((cnt) << 12))
+#define PD_MSG_HDR_COUNT(hdr)		(((hdr) >> 12) & 7)
+#define PD_MSG_HDR_TYPE(hdr)		((hdr) & 0x1F)
+#define PD_MSG_HDR_ID(hdr)		(((hdr) >> 9) & 7)
+#define PD_MSG_HDR_REV(hdr)		(((hdr) >> 6) & 3)
+#define PD_MSG_HDR_EXTENDED		BIT(15)
+#define PD_MSG_HDR_IS_EXTENDED(hdr)	((hdr) & PD_MSG_HDR_EXTENDED)
+
+#define PD_MSG_EXT_HDR(chunked, num, req, size) \
+	(((chunked) << 15) | (((num) & 0xF) << 11) | \
+	 ((req) << 10) | ((size) & 0x1FF))
+#define PD_MSG_EXT_HDR_IS_CHUNKED(ehdr)	((ehdr) & 0x8000)
+#define PD_MSG_EXT_HDR_CHUNK_NUM(ehdr)	(((ehdr) >> 11) & 0xF)
+#define PD_MSG_EXT_HDR_REQ_CHUNK(ehdr)	((ehdr) & 0x400)
+#define PD_MSG_EXT_HDR_DATA_SIZE(ehdr)	((ehdr) & 0x1FF)
+
+#define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \
+		(((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \
+		 ((usb_comm) << 25) | ((no_usb_susp) << 24) | \
+		 ((curr1) << 10) | (curr2))
+
+#define PD_RDO_AUGMENTED(obj, mismatch, usb_comm, no_usb_susp, volt, curr) \
+		(((obj) << 28) | ((mismatch) << 26) | ((usb_comm) << 25) | \
+		 ((no_usb_susp) << 24) | ((volt) << 9) | (curr))
+
+#define PD_RDO_OBJ_POS(rdo)		((rdo) >> 28 & 7)
+#define PD_RDO_GIVEBACK(rdo)		((rdo) >> 27 & 1)
+#define PD_RDO_MISMATCH(rdo)		((rdo) >> 26 & 1)
+#define PD_RDO_USB_COMM(rdo)		((rdo) >> 25 & 1)
+#define PD_RDO_NO_USB_SUSP(rdo)		((rdo) >> 24 & 1)
+#define PD_RDO_FIXED_CURR(rdo)		((rdo) >> 10 & 0x3FF)
+#define PD_RDO_FIXED_CURR_MINMAX(rdo)	((rdo) & 0x3FF)
+#define PD_RDO_PROG_VOLTAGE(rdo)	((rdo) >> 9 & 0x7FF)
+#define PD_RDO_PROG_CURR(rdo)		((rdo) & 0x7F)
+
+#define PD_SRC_PDO_TYPE(pdo)		(((pdo) >> 30) & 3)
+#define PD_SRC_PDO_TYPE_FIXED		0
+#define PD_SRC_PDO_TYPE_BATTERY		1
+#define PD_SRC_PDO_TYPE_VARIABLE	2
+#define PD_SRC_PDO_TYPE_AUGMENTED	3
+
+#define PD_SRC_PDO_FIXED_PR_SWAP(pdo)		(((pdo) >> 29) & 1)
+#define PD_SRC_PDO_FIXED_USB_SUSP(pdo)		(((pdo) >> 28) & 1)
+#define PD_SRC_PDO_FIXED_EXT_POWERED(pdo)	(((pdo) >> 27) & 1)
+#define PD_SRC_PDO_FIXED_USB_COMM(pdo)		(((pdo) >> 26) & 1)
+#define PD_SRC_PDO_FIXED_DR_SWAP(pdo)		(((pdo) >> 25) & 1)
+#define PD_SRC_PDO_FIXED_PEAK_CURR(pdo)		(((pdo) >> 20) & 3)
+#define PD_SRC_PDO_FIXED_VOLTAGE(pdo)		(((pdo) >> 10) & 0x3FF)
+#define PD_SRC_PDO_FIXED_MAX_CURR(pdo)		((pdo) & 0x3FF)
+
+#define PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo)	(((pdo) >> 20) & 0x3FF)
+#define PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo)	(((pdo) >> 10) & 0x3FF)
+#define PD_SRC_PDO_VAR_BATT_MAX(pdo)		((pdo) & 0x3FF)
+
+#define PD_APDO_PPS(pdo)			(((pdo) >> 28) & 3)
+#define PD_APDO_MAX_VOLT(pdo)			(((pdo) >> 17) & 0xFF)
+#define PD_APDO_MIN_VOLT(pdo)			(((pdo) >> 8) & 0xFF)
+#define PD_APDO_MAX_CURR(pdo)			((pdo) & 0x7F)
+
+/* Vendor Defined Messages */
+#define MAX_CRC_RECEIVE_TIME	9 /* ~(2 * tReceive_max(1.1ms) * # retry 4) */
+#define MAX_VDM_RESPONSE_TIME	60 /* 2 * tVDMSenderResponse_max(30ms) */
+#define MAX_VDM_BUSY_TIME	100 /* 2 * tVDMBusy (50ms) */
+
+#define PD_SNK_PDO_FIXED(prs, hc, uc, usb_comm, drs, volt, curr) \
+	(((prs) << 29) | ((hc) << 28) | ((uc) << 27) | ((usb_comm) << 26) | \
+	 ((drs) << 25) | ((volt) << 10) | (curr))
+
+/* VDM header is the first 32-bit object following the 16-bit PD header */
+#define VDM_HDR_SVID(hdr)	((hdr) >> 16)
+#define VDM_IS_SVDM(hdr)	((hdr) & 0x8000)
+#define SVDM_HDR_OBJ_POS(hdr)	(((hdr) >> 8) & 0x7)
+#define SVDM_HDR_CMD_TYPE(hdr)	(((hdr) >> 6) & 0x3)
+#define SVDM_HDR_CMD(hdr)	((hdr) & 0x1f)
+
+#define SVDM_HDR(svid, ver, obj, cmd_type, cmd) \
+	(((svid) << 16) | (1 << 15) | ((ver) << 13) \
+	| ((obj) << 8) | ((cmd_type) << 6) | (cmd))
+
+/* discover id response vdo bit fields */
+#define ID_HDR_USB_HOST		BIT(31)
+#define ID_HDR_USB_DEVICE	BIT(30)
+#define ID_HDR_MODAL_OPR	BIT(26)
+#define ID_HDR_PRODUCT_TYPE(n)	(((n) >> 27) & 0x7)
+#define ID_HDR_PRODUCT_PER_MASK	(2 << 27)
+#define ID_HDR_PRODUCT_HUB	1
+#define ID_HDR_PRODUCT_PER	2
+#define ID_HDR_PRODUCT_AMA	5
+#define ID_HDR_PRODUCT_VPD	6
+#define ID_HDR_VID		0x05c6 /* qcom */
+#define PROD_VDO_PID		0x0a00 /* TBD */
+
+#define PD_MIN_SINK_CURRENT	900
+
+static const u32 default_src_caps[] = { 0x36019096 };	/* VSafe5V @ 1.5A */
+static const u32 default_snk_caps[] = { 0x2601912C };	/* VSafe5V @ 3A */
+
+struct vdm_tx {
+	u32			data[PD_MAX_DATA_OBJ];
+	int			size;
+};
+
+struct rx_msg {
+	u16			hdr;
+	u16			data_len;	/* size of payload in bytes */
+	struct list_head	entry;
+	u8			payload[];
+};
+
+#define IS_DATA(m, t) ((m) && !PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+		PD_MSG_HDR_COUNT((m)->hdr) && \
+		(PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_CTRL(m, t) ((m) && !PD_MSG_HDR_COUNT((m)->hdr) && \
+		(PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_EXT(m, t) ((m) && PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+		(PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+
+struct usbpd {
+	struct device		dev;
+	struct workqueue_struct	*wq;
+	struct work_struct	sm_work;
+	struct work_struct	start_periph_work;
+	struct hrtimer		timer;
+	bool			sm_queued;
+
+	struct extcon_dev	*extcon;
+
+	enum usbpd_state	current_state;
+	bool			hard_reset_recvd;
+	ktime_t			hard_reset_recvd_time;
+	struct list_head	rx_q;
+	spinlock_t		rx_lock;
+	struct rx_msg		*rx_ext_msg;
+
+	u32			received_pdos[PD_MAX_DATA_OBJ];
+	u16			src_cap_id;
+	u8			selected_pdo;
+	u8			requested_pdo;
+	u32			rdo;	/* can be either source or sink */
+	int			current_voltage;	/* uV */
+	int			requested_voltage;	/* uV */
+	int			requested_current;	/* mA */
+	bool			pd_connected;
+	bool			in_explicit_contract;
+	bool			peer_usb_comm;
+	bool			peer_pr_swap;
+	bool			peer_dr_swap;
+
+	u32			sink_caps[7];
+	int			num_sink_caps;
+
+	struct power_supply	*usb_psy;
+	struct notifier_block	psy_nb;
+
+	enum power_supply_typec_mode typec_mode;
+	enum power_supply_typec_power_role forced_pr;
+	bool			vbus_present;
+
+	enum pd_spec_rev	spec_rev;
+	enum data_role		current_dr;
+	enum power_role		current_pr;
+	bool			in_pr_swap;
+	bool			pd_phy_opened;
+	bool			send_request;
+	struct completion	is_ready;
+	struct completion	tx_chunk_request;
+	u8			next_tx_chunk;
+
+	struct mutex		swap_lock;
+	struct dual_role_phy_instance	*dual_role;
+	struct dual_role_phy_desc	dr_desc;
+	bool			send_pr_swap;
+	bool			send_dr_swap;
+
+	struct regulator	*vbus;
+	struct regulator	*vconn;
+	bool			vbus_enabled;
+	bool			vconn_enabled;
+	bool			vconn_is_external;
+
+	u8			tx_msgid[SOPII_MSG + 1];
+	u8			rx_msgid[SOPII_MSG + 1];
+	int			caps_count;
+	int			hard_reset_count;
+
+	enum vdm_state		vdm_state;
+	u16			*discovered_svids;
+	int			num_svids;
+	struct vdm_tx		*vdm_tx;
+	struct vdm_tx		*vdm_tx_retry;
+	struct mutex		svid_handler_lock;
+	struct list_head	svid_handlers;
+
+	struct list_head	instance;
+
+	bool		has_dp;
+	u16			ss_lane_svid;
+
+	/* ext msg support */
+	bool			send_get_src_cap_ext;
+	u8			src_cap_ext_db[PD_SRC_CAP_EXT_DB_LEN];
+	bool			send_get_pps_status;
+	u32			pps_status_db;
+	bool			send_get_status;
+	u8			status_db[PD_STATUS_DB_LEN];
+	bool			send_get_battery_cap;
+	u8			get_battery_cap_db;
+	u8			battery_cap_db[PD_BATTERY_CAP_DB_LEN];
+	u8			get_battery_status_db;
+	bool			send_get_battery_status;
+	u32			battery_sts_dobj;
+};
+
+static LIST_HEAD(_usbpd);	/* useful for debugging */
+
+static const unsigned int usbpd_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_USB_HOST,
+	EXTCON_DISP_DP,
+	EXTCON_NONE,
+};
+
+struct usbpd_state_handler {
+	void (*enter_state)(struct usbpd *pd);
+	void (*handle_state)(struct usbpd *pd, struct rx_msg *msg);
+};
+
+static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type);
+static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state);
+static void handle_state_snk_wait_for_capabilities(struct usbpd *pd,
+	struct rx_msg *rx_msg);
+static void handle_state_prs_snk_src_source_on(struct usbpd *pd,
+	struct rx_msg *rx_msg);
+
+static const struct usbpd_state_handler state_handlers[];
+
+enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
+{
+	int ret;
+	union power_supply_propval val;
+
+	ret = power_supply_get_property(pd->usb_psy,
+		POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, &val);
+	if (ret)
+		return ORIENTATION_NONE;
+
+	return val.intval;
+}
+EXPORT_SYMBOL(usbpd_get_plug_orientation);
+
+static inline void stop_usb_host(struct usbpd *pd)
+{
+	extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 0);
+}
+
+static inline void start_usb_host(struct usbpd *pd, bool ss)
+{
+	enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+	union extcon_property_value val;
+
+	val.intval = (cc == ORIENTATION_CC2);
+	extcon_set_property(pd->extcon, EXTCON_USB_HOST,
+			EXTCON_PROP_USB_TYPEC_POLARITY, val);
+
+	val.intval = ss;
+	extcon_set_property(pd->extcon, EXTCON_USB_HOST,
+			EXTCON_PROP_USB_SS, val);
+
+	extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 1);
+}
+
+static inline void stop_usb_peripheral(struct usbpd *pd)
+{
+	extcon_set_state_sync(pd->extcon, EXTCON_USB, 0);
+}
+
+static inline void start_usb_peripheral(struct usbpd *pd)
+{
+	enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+	union extcon_property_value val;
+
+	val.intval = (cc == ORIENTATION_CC2);
+	extcon_set_property(pd->extcon, EXTCON_USB,
+			EXTCON_PROP_USB_TYPEC_POLARITY, val);
+
+	val.intval = 1;
+	extcon_set_property(pd->extcon, EXTCON_USB, EXTCON_PROP_USB_SS, val);
+
+	val.intval = pd->typec_mode > POWER_SUPPLY_TYPEC_SOURCE_DEFAULT ? 1 : 0;
+	extcon_set_property(pd->extcon, EXTCON_USB,
+			EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT, val);
+
+	extcon_set_state_sync(pd->extcon, EXTCON_USB, 1);
+}
+
+static void start_usb_peripheral_work(struct work_struct *w)
+{
+	struct usbpd *pd = container_of(w, struct usbpd, start_periph_work);
+
+	pd->current_state = PE_SNK_STARTUP;
+	pd->current_pr = PR_SINK;
+	pd->current_dr = DR_UFP;
+	start_usb_peripheral(pd);
+	dual_role_instance_changed(pd->dual_role);
+}
+
+/**
+ * This API allows client driver to request for releasing SS lanes. It should
+ * not be called from atomic context.
+ *
+ * @pd - USBPD handler
+ * @hdlr - client's handler
+ *
+ * @returns int - Success - 0, else negative error code
+ */
+static int usbpd_release_ss_lane(struct usbpd *pd,
+				struct usbpd_svid_handler *hdlr)
+{
+	int ret = 0;
+
+	if (!hdlr || !pd)
+		return -EINVAL;
+
+	usbpd_dbg(&pd->dev, "hdlr:%pK svid:%d", hdlr, hdlr->svid);
+	/*
+	 * If USB SS lanes are already used by one client, and other client is
+	 * requesting for same or same client requesting again, return -EBUSY.
+	 */
+	if (pd->ss_lane_svid) {
+		usbpd_dbg(&pd->dev, "-EBUSY: ss_lanes are already used by(%d)",
+							pd->ss_lane_svid);
+		ret = -EBUSY;
+		goto err_exit;
+	}
+
+	stop_usb_host(pd);
+
+	/* blocks until USB host is completely stopped */
+	ret = extcon_blocking_sync(pd->extcon, EXTCON_USB_HOST, 0);
+	if (ret) {
+		usbpd_err(&pd->dev, "err(%d) stopping host", ret);
+		goto err_exit;
+	}
+
+	if (pd->peer_usb_comm)
+		start_usb_host(pd, false);
+
+	pd->ss_lane_svid = hdlr->svid;
+
+	/* DP 4 Lane mode  */
+	ret = extcon_blocking_sync(pd->extcon, EXTCON_DISP_DP, 4);
+	if (ret) {
+		usbpd_err(&pd->dev, "err(%d) for notify DP 4 Lane", ret);
+		goto err_exit;
+	}
+
+err_exit:
+	return ret;
+}
+
+static int set_power_role(struct usbpd *pd, enum power_role pr)
+{
+	union power_supply_propval val = {0};
+
+	switch (pr) {
+	case PR_NONE:
+		val.intval = POWER_SUPPLY_TYPEC_PR_NONE;
+		break;
+	case PR_SINK:
+		val.intval = POWER_SUPPLY_TYPEC_PR_SINK;
+		break;
+	case PR_SRC:
+		val.intval = POWER_SUPPLY_TYPEC_PR_SOURCE;
+		break;
+	}
+
+	return power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+}
+
+static struct usbpd_svid_handler *find_svid_handler(struct usbpd *pd, u16 svid)
+{
+	struct usbpd_svid_handler *handler;
+
+	mutex_lock(&pd->svid_handler_lock);
+	list_for_each_entry(handler, &pd->svid_handlers, entry) {
+		if (svid == handler->svid) {
+			mutex_unlock(&pd->svid_handler_lock);
+			return handler;
+		}
+	}
+
+	mutex_unlock(&pd->svid_handler_lock);
+	return NULL;
+}
+
+/* Reset protocol layer */
+static inline void pd_reset_protocol(struct usbpd *pd)
+{
+	/*
+	 * first Rx ID should be 0; set this to a sentinel of -1 so that in
+	 * phy_msg_received() we can check if we had seen it before.
+	 */
+	memset(pd->rx_msgid, -1, sizeof(pd->rx_msgid));
+	memset(pd->tx_msgid, 0, sizeof(pd->tx_msgid));
+	pd->send_request = false;
+	pd->send_pr_swap = false;
+	pd->send_dr_swap = false;
+}
+
+static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data,
+		size_t num_data, enum pd_sop_type sop)
+{
+	int ret;
+	u16 hdr;
+
+	if (pd->hard_reset_recvd)
+		return -EBUSY;
+
+	hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
+			pd->tx_msgid[sop], num_data, pd->spec_rev);
+
+	ret = pd_phy_write(hdr, (u8 *)data, num_data * sizeof(u32), sop);
+	if (ret) {
+		if (pd->pd_connected)
+			usbpd_err(&pd->dev, "Error sending %s: %d\n",
+					msg_to_string(msg_type, num_data,
+							false),
+					ret);
+		return ret;
+	}
+
+	pd->tx_msgid[sop] = (pd->tx_msgid[sop] + 1) & PD_MAX_MSG_ID;
+	return 0;
+}
+
+static int pd_send_ext_msg(struct usbpd *pd, u8 msg_type,
+		const u8 *data, size_t data_len, enum pd_sop_type sop)
+{
+	int ret;
+	size_t len_remain, chunk_len;
+	u8 chunked_payload[PD_MAX_DATA_OBJ * sizeof(u32)] = {0};
+	u16 hdr;
+	u16 ext_hdr;
+	u8 num_objs;
+
+	if (data_len > PD_MAX_EXT_MSG_LEN) {
+		usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+		data_len = PD_MAX_EXT_MSG_LEN;
+	}
+
+	pd->next_tx_chunk = 0;
+	len_remain = data_len;
+	do {
+		ext_hdr = PD_MSG_EXT_HDR(1, pd->next_tx_chunk++, 0, data_len);
+		memcpy(chunked_payload, &ext_hdr, sizeof(ext_hdr));
+
+		chunk_len = min_t(size_t, len_remain,
+				PD_MAX_EXT_MSG_LEGACY_LEN);
+		memcpy(chunked_payload + sizeof(ext_hdr), data, chunk_len);
+
+		num_objs = DIV_ROUND_UP(chunk_len + sizeof(u16), sizeof(u32));
+		len_remain -= chunk_len;
+
+		reinit_completion(&pd->tx_chunk_request);
+		hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
+				pd->tx_msgid[sop], num_objs, pd->spec_rev) |
+			PD_MSG_HDR_EXTENDED;
+		ret = pd_phy_write(hdr, chunked_payload,
+				num_objs * sizeof(u32), sop);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error sending %s: %d\n",
+					usbpd_ext_msg_strings[msg_type],
+					ret);
+			return ret;
+		}
+
+		pd->tx_msgid[sop] = (pd->tx_msgid[sop] + 1) & PD_MAX_MSG_ID;
+
+		/* Wait for request chunk */
+		if (len_remain &&
+			!wait_for_completion_timeout(&pd->tx_chunk_request,
+				msecs_to_jiffies(SENDER_RESPONSE_TIME))) {
+			usbpd_err(&pd->dev, "Timed out waiting for chunk request\n");
+			return -EPROTO;
+		}
+	} while (len_remain);
+
+	return 0;
+}
+
+static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
+{
+	int curr;
+	int max_current;
+	bool mismatch = false;
+	u8 type;
+	u32 pdo = pd->received_pdos[pdo_pos - 1];
+
+	type = PD_SRC_PDO_TYPE(pdo);
+	if (type == PD_SRC_PDO_TYPE_FIXED) {
+		curr = max_current = PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10;
+
+		/*
+		 * Check if the PDO has enough current, otherwise set the
+		 * Capability Mismatch flag
+		 */
+		if (curr < PD_MIN_SINK_CURRENT) {
+			mismatch = true;
+			max_current = PD_MIN_SINK_CURRENT;
+		}
+
+		pd->requested_voltage =
+			PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50 * 1000;
+		pd->rdo = PD_RDO_FIXED(pdo_pos, 0, mismatch, 1, 1, curr / 10,
+				max_current / 10);
+	} else if (type == PD_SRC_PDO_TYPE_AUGMENTED) {
+		if ((uv / 100000) > PD_APDO_MAX_VOLT(pdo) ||
+			(uv / 100000) < PD_APDO_MIN_VOLT(pdo) ||
+			(ua / 50000) > PD_APDO_MAX_CURR(pdo) || (ua < 0)) {
+			usbpd_err(&pd->dev, "uv (%d) and ua (%d) out of range of APDO\n",
+					uv, ua);
+			return -EINVAL;
+		}
+
+		curr = ua / 1000;
+		pd->requested_voltage = uv;
+		pd->rdo = PD_RDO_AUGMENTED(pdo_pos, mismatch, 1, 1,
+				uv / 20000, ua / 50000);
+	} else {
+		usbpd_err(&pd->dev, "Only Fixed or Programmable PDOs supported\n");
+		return -ENOTSUPP;
+	}
+
+	/* Can't sink more than 5V if VCONN is sourced from the VBUS input */
+	if (pd->vconn_enabled && !pd->vconn_is_external &&
+			pd->requested_voltage > 5000000)
+		return -ENOTSUPP;
+
+	pd->requested_current = curr;
+	pd->requested_pdo = pdo_pos;
+
+	return 0;
+}
+
+static int pd_eval_src_caps(struct usbpd *pd)
+{
+	int i;
+	union power_supply_propval val;
+	bool pps_found = false;
+	u32 first_pdo = pd->received_pdos[0];
+
+	if (PD_SRC_PDO_TYPE(first_pdo) != PD_SRC_PDO_TYPE_FIXED) {
+		usbpd_err(&pd->dev, "First src_cap invalid %08x\n", first_pdo);
+		return -EINVAL;
+	}
+
+	pd->peer_usb_comm = PD_SRC_PDO_FIXED_USB_COMM(first_pdo);
+	pd->peer_pr_swap = PD_SRC_PDO_FIXED_PR_SWAP(first_pdo);
+	pd->peer_dr_swap = PD_SRC_PDO_FIXED_DR_SWAP(first_pdo);
+
+	val.intval = PD_SRC_PDO_FIXED_USB_SUSP(first_pdo);
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED, &val);
+
+	/* First time connecting to a PD source and it supports USB data */
+	if (pd->peer_usb_comm && pd->current_dr == DR_UFP && !pd->pd_connected)
+		start_usb_peripheral(pd);
+
+	/* Check for PPS APDOs */
+	if (pd->spec_rev == USBPD_REV_30) {
+		for (i = 1; i < PD_MAX_DATA_OBJ; i++) {
+			if ((PD_SRC_PDO_TYPE(pd->received_pdos[i]) ==
+					PD_SRC_PDO_TYPE_AUGMENTED) &&
+				!PD_APDO_PPS(pd->received_pdos[i])) {
+				pps_found = true;
+				break;
+			}
+		}
+
+		/* downgrade to 2.0 if no PPS */
+		if (!pps_found)
+			pd->spec_rev = USBPD_REV_20;
+	}
+
+	val.intval = pps_found ?
+			POWER_SUPPLY_PD_PPS_ACTIVE :
+			POWER_SUPPLY_PD_ACTIVE;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
+	/* Select the first PDO (vSafe5V) immediately. */
+	pd_select_pdo(pd, 1, 0, 0);
+
+	return 0;
+}
+
+static void pd_send_hard_reset(struct usbpd *pd)
+{
+	union power_supply_propval val = {0};
+
+	usbpd_dbg(&pd->dev, "send hard reset");
+
+	pd->hard_reset_count++;
+	pd_phy_signal(HARD_RESET_SIG);
+	pd->in_pr_swap = false;
+	power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
+}
+
+static void kick_sm(struct usbpd *pd, int ms)
+{
+	pm_stay_awake(&pd->dev);
+	pd->sm_queued = true;
+
+	if (ms)
+		hrtimer_start(&pd->timer, ms_to_ktime(ms), HRTIMER_MODE_REL);
+	else
+		queue_work(pd->wq, &pd->sm_work);
+}
+
+static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
+{
+	union power_supply_propval val = {1};
+
+	if (sig != HARD_RESET_SIG) {
+		usbpd_err(&pd->dev, "invalid signal (%d) received\n", sig);
+		return;
+	}
+
+	pd->hard_reset_recvd = true;
+	pd->hard_reset_recvd_time = ktime_get();
+
+	usbpd_err(&pd->dev, "hard reset received\n");
+
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+	kick_sm(pd, 0);
+}
+
+struct pd_request_chunk {
+	struct work_struct	w;
+	struct usbpd		*pd;
+	u8			msg_type;
+	u8			chunk_num;
+	enum pd_sop_type	sop;
+};
+
+static void pd_request_chunk_work(struct work_struct *w)
+{
+	struct pd_request_chunk *req =
+		container_of(w, struct pd_request_chunk, w);
+	struct usbpd *pd = req->pd;
+	unsigned long flags;
+	int ret;
+	u8 payload[4] = {0}; /* ext_hdr + padding */
+	u16 hdr = PD_MSG_HDR(req->msg_type, pd->current_dr, pd->current_pr,
+				pd->tx_msgid[req->sop], 1, pd->spec_rev)
+		| PD_MSG_HDR_EXTENDED;
+
+	*(u16 *)payload = PD_MSG_EXT_HDR(1, req->chunk_num, 1, 0);
+
+	ret = pd_phy_write(hdr, payload, sizeof(payload), req->sop);
+	if (!ret) {
+		pd->tx_msgid[req->sop] =
+			(pd->tx_msgid[req->sop] + 1) & PD_MAX_MSG_ID;
+	} else {
+		usbpd_err(&pd->dev, "could not send chunk request\n");
+
+		/* queue what we have anyway */
+		spin_lock_irqsave(&pd->rx_lock, flags);
+		list_add_tail(&pd->rx_ext_msg->entry, &pd->rx_q);
+		spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+		pd->rx_ext_msg = NULL;
+	}
+
+	kfree(req);
+}
+
+static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf,
+		size_t len, enum pd_sop_type sop)
+{
+	struct rx_msg *rx_msg;
+	u16 bytes_to_copy;
+	u16 ext_hdr = *(u16 *)buf;
+	u8 chunk_num;
+
+	if (!PD_MSG_EXT_HDR_IS_CHUNKED(ext_hdr)) {
+		usbpd_err(&pd->dev, "unchunked extended messages unsupported\n");
+		return NULL;
+	}
+
+	/* request for next Tx chunk */
+	if (PD_MSG_EXT_HDR_REQ_CHUNK(ext_hdr)) {
+		if (PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr) ||
+			PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr) !=
+				pd->next_tx_chunk) {
+			usbpd_err(&pd->dev, "invalid request chunk ext header 0x%02x\n",
+					ext_hdr);
+			return NULL;
+		}
+
+		if (!completion_done(&pd->tx_chunk_request))
+			complete(&pd->tx_chunk_request);
+
+		return NULL;
+	}
+
+	chunk_num = PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr);
+	if (!chunk_num) {
+		/* allocate new message if first chunk */
+		rx_msg = kzalloc(sizeof(*rx_msg) +
+				PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr),
+				GFP_ATOMIC);
+		if (!rx_msg)
+			return NULL;
+
+		rx_msg->hdr = header;
+		rx_msg->data_len = PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr);
+
+		if (rx_msg->data_len > PD_MAX_EXT_MSG_LEN) {
+			usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+			rx_msg->data_len = PD_MAX_EXT_MSG_LEN;
+		}
+	} else {
+		if (!pd->rx_ext_msg) {
+			usbpd_err(&pd->dev, "missing first rx_ext_msg chunk\n");
+			return NULL;
+		}
+
+		rx_msg = pd->rx_ext_msg;
+	}
+
+	/*
+	 * The amount to copy is derived as follows:
+	 *
+	 * - if extended data_len < 26, then copy data_len bytes
+	 * - for chunks 0..N-2, copy 26 bytes
+	 * - for the last chunk (N-1), copy the remainder
+	 */
+	bytes_to_copy =
+		min((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN),
+			PD_MAX_EXT_MSG_LEGACY_LEN);
+
+	/* check against received length to avoid overrun */
+	if (bytes_to_copy > len - sizeof(ext_hdr)) {
+		usbpd_warn(&pd->dev, "not enough bytes in chunk, expected:%u received:%lu\n",
+			bytes_to_copy, len - sizeof(ext_hdr));
+		bytes_to_copy = len - sizeof(ext_hdr);
+	}
+
+	memcpy(rx_msg->payload + chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN, buf + 2,
+			bytes_to_copy);
+
+	/* request next chunk? */
+	if ((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN) >
+			PD_MAX_EXT_MSG_LEGACY_LEN) {
+		struct pd_request_chunk *req;
+
+		if (pd->rx_ext_msg && pd->rx_ext_msg != rx_msg) {
+			usbpd_dbg(&pd->dev, "stale previous rx_ext_msg?\n");
+			kfree(pd->rx_ext_msg);
+		}
+
+		pd->rx_ext_msg = rx_msg;
+
+		req = kzalloc(sizeof(*req), GFP_ATOMIC);
+		if (!req)
+			goto queue_rx; /* return what we have anyway */
+
+		INIT_WORK(&req->w, pd_request_chunk_work);
+		req->pd = pd;
+		req->msg_type = PD_MSG_HDR_TYPE(header);
+		req->chunk_num = chunk_num + 1;
+		req->sop = sop;
+		queue_work(pd->wq, &req->w);
+
+		return NULL;
+	}
+
+queue_rx:
+	pd->rx_ext_msg = NULL;
+	return rx_msg;	/* queue it for usbpd_sm */
+}
+
+static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
+		u8 *buf, size_t len)
+{
+	struct rx_msg *rx_msg;
+	unsigned long flags;
+	u16 header;
+	u8 msg_type, num_objs;
+
+	if (sop == SOPII_MSG) {
+		usbpd_err(&pd->dev, "only SOP/SOP' supported\n");
+		return;
+	}
+
+	if (len < 2) {
+		usbpd_err(&pd->dev, "invalid message received, len=%zd\n", len);
+		return;
+	}
+
+	header = *((u16 *)buf);
+	buf += sizeof(u16);
+	len -= sizeof(u16);
+
+	if (len % 4 != 0) {
+		usbpd_err(&pd->dev, "len=%zd not multiple of 4\n", len);
+		return;
+	}
+
+	/* if MSGID already seen, discard */
+	if (PD_MSG_HDR_ID(header) == pd->rx_msgid[sop] &&
+			PD_MSG_HDR_TYPE(header) != MSG_SOFT_RESET) {
+		usbpd_dbg(&pd->dev, "MessageID already seen, discarding\n");
+		return;
+	}
+
+	pd->rx_msgid[sop] = PD_MSG_HDR_ID(header);
+
+	/* discard Pings */
+	if (PD_MSG_HDR_TYPE(header) == MSG_PING && !len)
+		return;
+
+	/* check header's count field to see if it matches len */
+	if (PD_MSG_HDR_COUNT(header) != (len / 4)) {
+		usbpd_err(&pd->dev, "header count (%d) mismatch, len=%zd\n",
+				PD_MSG_HDR_COUNT(header), len);
+		return;
+	}
+
+	/* if spec rev differs (i.e. is older), update PHY */
+	if (PD_MSG_HDR_REV(header) < pd->spec_rev)
+		pd->spec_rev = PD_MSG_HDR_REV(header);
+
+	msg_type = PD_MSG_HDR_TYPE(header);
+	num_objs = PD_MSG_HDR_COUNT(header);
+	usbpd_dbg(&pd->dev, "%s type(%d) num_objs(%d)\n",
+			msg_to_string(msg_type, num_objs,
+				PD_MSG_HDR_IS_EXTENDED(header)),
+			msg_type, num_objs);
+
+	if (!PD_MSG_HDR_IS_EXTENDED(header)) {
+		rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_ATOMIC);
+		if (!rx_msg)
+			return;
+
+		rx_msg->hdr = header;
+		rx_msg->data_len = len;
+		memcpy(rx_msg->payload, buf, len);
+	} else {
+		rx_msg = pd_ext_msg_received(pd, header, buf, len, sop);
+		if (!rx_msg)
+			return;
+	}
+
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	list_add_tail(&rx_msg->entry, &pd->rx_q);
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+	kick_sm(pd, 0);
+}
+
+static void phy_shutdown(struct usbpd *pd)
+{
+	usbpd_dbg(&pd->dev, "shutdown");
+
+	if (pd->vconn_enabled) {
+		regulator_disable(pd->vconn);
+		pd->vconn_enabled = false;
+	}
+
+	if (pd->vbus_enabled) {
+		regulator_disable(pd->vbus);
+		pd->vbus_enabled = false;
+	}
+}
+
+static enum hrtimer_restart pd_timeout(struct hrtimer *timer)
+{
+	struct usbpd *pd = container_of(timer, struct usbpd, timer);
+
+	usbpd_dbg(&pd->dev, "timeout");
+	queue_work(pd->wq, &pd->sm_work);
+
+	return HRTIMER_NORESTART;
+}
+
+static void log_decoded_request(struct usbpd *pd, u32 rdo)
+{
+	const u32 *pdos;
+	int pos = PD_RDO_OBJ_POS(rdo);
+	int type;
+
+	usbpd_dbg(&pd->dev, "RDO: 0x%08x\n", pd->rdo);
+
+	if (pd->current_pr == PR_SINK)
+		pdos = pd->received_pdos;
+	else
+		pdos = default_src_caps;
+
+	type = PD_SRC_PDO_TYPE(pdos[pos - 1]);
+
+	switch (type) {
+	case PD_SRC_PDO_TYPE_FIXED:
+	case PD_SRC_PDO_TYPE_VARIABLE:
+		usbpd_dbg(&pd->dev, "Request Fixed/Variable PDO:%d Volt:%dmV OpCurr:%dmA Curr:%dmA\n",
+				pos,
+				PD_SRC_PDO_FIXED_VOLTAGE(pdos[pos - 1]) * 50,
+				PD_RDO_FIXED_CURR(rdo) * 10,
+				PD_RDO_FIXED_CURR_MINMAX(rdo) * 10);
+		break;
+
+	case PD_SRC_PDO_TYPE_BATTERY:
+		usbpd_dbg(&pd->dev, "Request Battery PDO:%d OpPow:%dmW Pow:%dmW\n",
+				pos,
+				PD_RDO_FIXED_CURR(rdo) * 250,
+				PD_RDO_FIXED_CURR_MINMAX(rdo) * 250);
+		break;
+
+	case PD_SRC_PDO_TYPE_AUGMENTED:
+		usbpd_dbg(&pd->dev, "Request PPS PDO:%d Volt:%dmV Curr:%dmA\n",
+				pos,
+				PD_RDO_PROG_VOLTAGE(rdo) * 20,
+				PD_RDO_PROG_CURR(rdo) * 50);
+		break;
+	}
+}
+
+int usbpd_register_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
+{
+	if (find_svid_handler(pd, hdlr->svid)) {
+		usbpd_err(&pd->dev, "SVID 0x%04x already registered\n",
+				hdlr->svid);
+		return -EINVAL;
+	}
+
+	/* require connect/disconnect callbacks be implemented */
+	if (!hdlr->connect || !hdlr->disconnect) {
+		usbpd_err(&pd->dev, "SVID 0x%04x connect/disconnect must be non-NULL\n",
+				hdlr->svid);
+		return -EINVAL;
+	}
+
+	usbpd_dbg(&pd->dev, "registered handler(%pK) for SVID 0x%04x\n",
+							hdlr, hdlr->svid);
+	mutex_lock(&pd->svid_handler_lock);
+	list_add_tail(&hdlr->entry, &pd->svid_handlers);
+	mutex_unlock(&pd->svid_handler_lock);
+	hdlr->request_usb_ss_lane = usbpd_release_ss_lane;
+
+	/* already connected with this SVID discovered? */
+	if (pd->vdm_state >= DISCOVERED_SVIDS) {
+		int i;
+
+		for (i = 0; i < pd->num_svids; i++) {
+			if (pd->discovered_svids[i] == hdlr->svid) {
+				hdlr->connect(hdlr);
+				hdlr->discovered = true;
+				break;
+			}
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(usbpd_register_svid);
+
+void usbpd_unregister_svid(struct usbpd *pd, struct usbpd_svid_handler *hdlr)
+{
+
+	usbpd_dbg(&pd->dev, "unregistered handler(%pK) for SVID 0x%04x\n",
+							hdlr, hdlr->svid);
+	mutex_lock(&pd->svid_handler_lock);
+	list_del_init(&hdlr->entry);
+	mutex_unlock(&pd->svid_handler_lock);
+}
+EXPORT_SYMBOL(usbpd_unregister_svid);
+
+int usbpd_send_vdm(struct usbpd *pd, u32 vdm_hdr, const u32 *vdos, int num_vdos)
+{
+	struct vdm_tx *vdm_tx;
+
+	if (pd->vdm_tx)
+		return -EBUSY;
+
+	vdm_tx = kzalloc(sizeof(*vdm_tx), GFP_KERNEL);
+	if (!vdm_tx)
+		return -ENOMEM;
+
+	vdm_tx->data[0] = vdm_hdr;
+	if (vdos && num_vdos)
+		memcpy(&vdm_tx->data[1], vdos, num_vdos * sizeof(u32));
+	vdm_tx->size = num_vdos + 1; /* include the header */
+
+	/* VDM will get sent in PE_SRC/SNK_READY state handling */
+	pd->vdm_tx = vdm_tx;
+
+	/* slight delay before queuing to prioritize handling of incoming VDM */
+	if (pd->in_explicit_contract)
+		kick_sm(pd, 2);
+
+	return 0;
+}
+EXPORT_SYMBOL(usbpd_send_vdm);
+
+int usbpd_send_svdm(struct usbpd *pd, u16 svid, u8 cmd,
+		enum usbpd_svdm_cmd_type cmd_type, int obj_pos,
+		const u32 *vdos, int num_vdos)
+{
+	u32 svdm_hdr = SVDM_HDR(svid, 0, obj_pos, cmd_type, cmd);
+
+	usbpd_dbg(&pd->dev, "VDM tx: svid:%x cmd:%x cmd_type:%x svdm_hdr:%x\n",
+			svid, cmd, cmd_type, svdm_hdr);
+
+	return usbpd_send_vdm(pd, svdm_hdr, vdos, num_vdos);
+}
+EXPORT_SYMBOL(usbpd_send_svdm);
+
+static void handle_vdm_resp_ack(struct usbpd *pd, u32 *vdos, u8 num_vdos,
+	u8 cmd)
+{
+	int ret, i;
+	u16 svid, *psvid;
+	struct usbpd_svid_handler *handler;
+
+	switch (cmd) {
+	case USBPD_SVDM_DISCOVER_IDENTITY:
+		kfree(pd->vdm_tx_retry);
+		pd->vdm_tx_retry = NULL;
+
+		if (num_vdos && ID_HDR_PRODUCT_TYPE(vdos[0]) ==
+				ID_HDR_PRODUCT_VPD) {
+
+			usbpd_dbg(&pd->dev, "VPD detected turn off vbus\n");
+
+			if (pd->vbus_enabled) {
+				ret = regulator_disable(pd->vbus);
+				if (ret)
+					usbpd_err(&pd->dev, "Err disabling vbus (%d)\n",
+							ret);
+				else
+					pd->vbus_enabled = false;
+			}
+		}
+
+		if (!pd->in_explicit_contract)
+			break;
+
+		pd->vdm_state = DISCOVERED_ID;
+		usbpd_send_svdm(pd, USBPD_SID,
+				USBPD_SVDM_DISCOVER_SVIDS,
+				SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+		break;
+
+	case USBPD_SVDM_DISCOVER_SVIDS:
+		pd->vdm_state = DISCOVERED_SVIDS;
+
+		kfree(pd->vdm_tx_retry);
+		pd->vdm_tx_retry = NULL;
+
+		if (!pd->discovered_svids) {
+			pd->num_svids = 2 * num_vdos;
+			pd->discovered_svids = kcalloc(pd->num_svids,
+							sizeof(u16),
+							GFP_KERNEL);
+			if (!pd->discovered_svids)
+				break;
+
+			psvid = pd->discovered_svids;
+		} else { /* handle > 12 SVIDs */
+			void *ptr;
+			size_t oldsize = pd->num_svids * sizeof(u16);
+			size_t newsize = oldsize +
+					(2 * num_vdos * sizeof(u16));
+
+			ptr = krealloc(pd->discovered_svids, newsize,
+					GFP_KERNEL);
+			if (!ptr)
+				break;
+
+			pd->discovered_svids = ptr;
+			psvid = pd->discovered_svids + pd->num_svids;
+			memset(psvid, 0, (2 * num_vdos));
+			pd->num_svids += 2 * num_vdos;
+		}
+
+		/* convert 32-bit VDOs to list of 16-bit SVIDs */
+		for (i = 0; i < num_vdos * 2; i++) {
+			/*
+			 * Within each 32-bit VDO,
+			 *    SVID[i]: upper 16-bits
+			 *    SVID[i+1]: lower 16-bits
+			 * where i is even.
+			 */
+			if (!(i & 1))
+				svid = vdos[i >> 1] >> 16;
+			else
+				svid = vdos[i >> 1] & 0xFFFF;
+
+			/*
+			 * There are some devices that incorrectly
+			 * swap the order of SVIDs within a VDO. So in
+			 * case of an odd-number of SVIDs it could end
+			 * up with SVID[i] as 0 while SVID[i+1] is
+			 * non-zero. Just skip over the zero ones.
+			 */
+			if (svid) {
+				usbpd_dbg(&pd->dev, "Discovered SVID: 0x%04x\n",
+						svid);
+				*psvid++ = svid;
+			}
+		}
+
+		/* if more than 12 SVIDs, resend the request */
+		if (num_vdos == 6 && vdos[5] != 0) {
+			usbpd_send_svdm(pd, USBPD_SID,
+					USBPD_SVDM_DISCOVER_SVIDS,
+					SVDM_CMD_TYPE_INITIATOR, 0,
+					NULL, 0);
+			break;
+		}
+
+		/* now that all SVIDs are discovered, notify handlers */
+		for (i = 0; i < pd->num_svids; i++) {
+			svid = pd->discovered_svids[i];
+			if (svid) {
+				handler = find_svid_handler(pd, svid);
+				if (handler) {
+					handler->connect(handler);
+					handler->discovered = true;
+				}
+			}
+		}
+		break;
+
+	default:
+		usbpd_dbg(&pd->dev, "unhandled ACK for command:0x%x\n",
+				cmd);
+		break;
+	}
+
+}
+static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	u32 vdm_hdr =
+	rx_msg->data_len >= sizeof(u32) ? ((u32 *)rx_msg->payload)[0] : 0;
+
+	u32 *vdos = (u32 *)&rx_msg->payload[sizeof(u32)];
+	u16 svid = VDM_HDR_SVID(vdm_hdr);
+
+	u8 num_vdos = PD_MSG_HDR_COUNT(rx_msg->hdr) - 1;
+	u8 cmd = SVDM_HDR_CMD(vdm_hdr);
+	u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr);
+	struct usbpd_svid_handler *handler;
+
+	usbpd_dbg(&pd->dev,
+			"VDM rx: svid:%x cmd:%x cmd_type:%x vdm_hdr:%x has_dp: %s\n",
+			svid, cmd, cmd_type, vdm_hdr,
+			pd->has_dp ? "true" : "false");
+
+	if ((svid == 0xFF01) && (!pd->has_dp)) {
+		pd->has_dp = true;
+
+		/* Set to USB and DP cocurrency mode */
+		extcon_blocking_sync(pd->extcon, EXTCON_DISP_DP, 2);
+	}
+
+	/* if it's a supported SVID, pass the message to the handler */
+	handler = find_svid_handler(pd, svid);
+
+	/* Unstructured VDM */
+	if (!VDM_IS_SVDM(vdm_hdr)) {
+		if (handler && handler->vdm_received)
+			handler->vdm_received(handler, vdm_hdr, vdos, num_vdos);
+		return;
+	}
+
+	/* if this interrupts a previous exchange, abort queued response */
+	if (cmd_type == SVDM_CMD_TYPE_INITIATOR && pd->vdm_tx) {
+		usbpd_dbg(&pd->dev, "Discarding previously queued SVDM tx (SVID:0x%04x)\n",
+				VDM_HDR_SVID(pd->vdm_tx->data[0]));
+
+		kfree(pd->vdm_tx);
+		pd->vdm_tx = NULL;
+	}
+
+	if (handler && handler->svdm_received) {
+		handler->svdm_received(handler, cmd, cmd_type, vdos, num_vdos);
+		return;
+	}
+
+	/* Standard Discovery or unhandled messages go here */
+	switch (cmd_type) {
+	case SVDM_CMD_TYPE_INITIATOR:
+		if (svid == USBPD_SID && cmd == USBPD_SVDM_DISCOVER_IDENTITY) {
+			u32 tx_vdos[3] = {
+				ID_HDR_USB_HOST | ID_HDR_USB_DEVICE |
+					ID_HDR_PRODUCT_PER_MASK | ID_HDR_VID,
+				0x0, /* TBD: Cert Stat VDO */
+				(PROD_VDO_PID << 16),
+				/* TBD: Get these from gadget */
+			};
+
+			usbpd_send_svdm(pd, USBPD_SID, cmd,
+					SVDM_CMD_TYPE_RESP_ACK, 0, tx_vdos, 3);
+		} else if (cmd != USBPD_SVDM_ATTENTION) {
+			usbpd_send_svdm(pd, svid, cmd, SVDM_CMD_TYPE_RESP_NAK,
+					SVDM_HDR_OBJ_POS(vdm_hdr), NULL, 0);
+		}
+		break;
+
+	case SVDM_CMD_TYPE_RESP_ACK:
+		if (svid != USBPD_SID) {
+			usbpd_err(&pd->dev, "unhandled ACK for SVID:0x%x\n",
+					svid);
+			break;
+		}
+
+		handle_vdm_resp_ack(pd, vdos, num_vdos, cmd);
+		break;
+
+	case SVDM_CMD_TYPE_RESP_NAK:
+		usbpd_info(&pd->dev, "VDM NAK received for SVID:0x%04x command:0x%x\n",
+				svid, cmd);
+		break;
+
+	case SVDM_CMD_TYPE_RESP_BUSY:
+		switch (cmd) {
+		case USBPD_SVDM_DISCOVER_IDENTITY:
+		case USBPD_SVDM_DISCOVER_SVIDS:
+			if (!pd->vdm_tx_retry) {
+				usbpd_err(&pd->dev, "Discover command %d VDM was unexpectedly freed\n",
+						cmd);
+				break;
+			}
+
+			/* wait tVDMBusy, then retry */
+			pd->vdm_tx = pd->vdm_tx_retry;
+			pd->vdm_tx_retry = NULL;
+			kick_sm(pd, VDM_BUSY_TIME);
+			break;
+		default:
+			break;
+		}
+		break;
+	}
+}
+
+static void handle_vdm_tx(struct usbpd *pd, enum pd_sop_type sop_type)
+{
+	int ret;
+	unsigned long flags;
+
+	/* only send one VDM at a time */
+	if (pd->vdm_tx) {
+		u32 vdm_hdr = pd->vdm_tx->data[0];
+
+		/* bail out and try again later if a message just arrived */
+		spin_lock_irqsave(&pd->rx_lock, flags);
+		if (!list_empty(&pd->rx_q)) {
+			spin_unlock_irqrestore(&pd->rx_lock, flags);
+			return;
+		}
+		spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+		ret = pd_send_msg(pd, MSG_VDM, pd->vdm_tx->data,
+				pd->vdm_tx->size, sop_type);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error (%d) sending VDM command %d\n",
+					ret, SVDM_HDR_CMD(pd->vdm_tx->data[0]));
+
+			/* retry when hitting PE_SRC/SNK_Ready again */
+			if (ret != -EBUSY && sop_type == SOP_MSG)
+				usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+
+			return;
+		}
+
+		/*
+		 * special case: keep initiated Discover ID/SVIDs
+		 * around in case we need to re-try when receiving BUSY
+		 */
+		if (VDM_IS_SVDM(vdm_hdr) &&
+			SVDM_HDR_CMD_TYPE(vdm_hdr) == SVDM_CMD_TYPE_INITIATOR &&
+			SVDM_HDR_CMD(vdm_hdr) <= USBPD_SVDM_DISCOVER_SVIDS) {
+			if (pd->vdm_tx_retry) {
+				usbpd_dbg(&pd->dev, "Previous Discover VDM command %d not ACKed/NAKed\n",
+					SVDM_HDR_CMD(
+						pd->vdm_tx_retry->data[0]));
+				kfree(pd->vdm_tx_retry);
+			}
+			pd->vdm_tx_retry = pd->vdm_tx;
+		} else {
+			kfree(pd->vdm_tx);
+		}
+
+		pd->vdm_tx = NULL;
+	}
+}
+
+static void reset_vdm_state(struct usbpd *pd)
+{
+	struct usbpd_svid_handler *handler;
+
+	mutex_lock(&pd->svid_handler_lock);
+	list_for_each_entry(handler, &pd->svid_handlers, entry) {
+		if (handler->discovered) {
+			handler->disconnect(handler);
+			handler->discovered = false;
+		}
+	}
+
+	mutex_unlock(&pd->svid_handler_lock);
+	pd->vdm_state = VDM_NONE;
+	kfree(pd->vdm_tx_retry);
+	pd->vdm_tx_retry = NULL;
+	kfree(pd->discovered_svids);
+	pd->discovered_svids = NULL;
+	pd->num_svids = 0;
+	kfree(pd->vdm_tx);
+	pd->vdm_tx = NULL;
+	pd->ss_lane_svid = 0x0;
+}
+
+static void dr_swap(struct usbpd *pd)
+{
+	reset_vdm_state(pd);
+	usbpd_dbg(&pd->dev, "%s: current_dr(%d)\n", __func__, pd->current_dr);
+
+	if (pd->current_dr == DR_DFP) {
+		stop_usb_host(pd);
+		if (pd->peer_usb_comm)
+			start_usb_peripheral(pd);
+		pd->current_dr = DR_UFP;
+	} else if (pd->current_dr == DR_UFP) {
+		stop_usb_peripheral(pd);
+		if (pd->peer_usb_comm)
+			start_usb_host(pd, true);
+		pd->current_dr = DR_DFP;
+
+		usbpd_send_svdm(pd, USBPD_SID, USBPD_SVDM_DISCOVER_IDENTITY,
+				SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+	}
+
+	pd_phy_update_roles(pd->current_dr, pd->current_pr);
+	dual_role_instance_changed(pd->dual_role);
+}
+
+
+static void vconn_swap(struct usbpd *pd)
+{
+	int ret;
+
+	if (pd->vconn_enabled) {
+		pd_phy_update_frame_filter(FRAME_FILTER_EN_SOP |
+					   FRAME_FILTER_EN_HARD_RESET);
+
+		pd->current_state = PE_VCS_WAIT_FOR_VCONN;
+		kick_sm(pd, VCONN_ON_TIME);
+	} else {
+		if (!pd->vconn) {
+			pd->vconn = devm_regulator_get(pd->dev.parent, "vconn");
+			if (IS_ERR(pd->vconn)) {
+				usbpd_err(&pd->dev, "Unable to get vconn\n");
+				return;
+			}
+		}
+		ret = regulator_enable(pd->vconn);
+		if (ret) {
+			usbpd_err(&pd->dev, "Unable to enable vconn\n");
+			return;
+		}
+
+		pd->vconn_enabled = true;
+
+		pd_phy_update_frame_filter(FRAME_FILTER_EN_SOP |
+					   FRAME_FILTER_EN_SOPI |
+					   FRAME_FILTER_EN_HARD_RESET);
+
+		/*
+		 * Small delay to ensure Vconn has ramped up. This is well
+		 * below tVCONNSourceOn (100ms) so we still send PS_RDY within
+		 * the allowed time.
+		 */
+		usleep_range(5000, 10000);
+
+		ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+	}
+}
+
+static int enable_vbus(struct usbpd *pd)
+{
+	union power_supply_propval val = {0};
+	int count = 100;
+	int ret;
+
+	/*
+	 * Check to make sure there's no lingering charge on
+	 * VBUS before enabling it as a source. If so poll here
+	 * until it goes below VSafe0V (0.8V) before proceeding.
+	 */
+	while (count--) {
+		ret = power_supply_get_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+		if (ret || val.intval <= 800000)
+			break;
+		usleep_range(20000, 30000);
+	}
+
+	if (count < 99)
+		msleep(100);	/* need to wait an additional tCCDebounce */
+
+	if (!pd->vbus) {
+		pd->vbus = devm_regulator_get(pd->dev.parent, "vbus");
+		if (IS_ERR(pd->vbus)) {
+			usbpd_err(&pd->dev, "Unable to get vbus\n");
+			return -EAGAIN;
+		}
+	}
+	ret = regulator_enable(pd->vbus);
+	if (ret)
+		usbpd_err(&pd->dev, "Unable to enable vbus (%d)\n", ret);
+	else
+		pd->vbus_enabled = true;
+
+	count = 10;
+	/*
+	 * Check to make sure VBUS voltage reaches above Vsafe5Vmin (4.75v)
+	 * before proceeding.
+	 */
+	while (count--) {
+		ret = power_supply_get_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+		if (ret || val.intval >= 4750000) /*vsafe5Vmin*/
+			break;
+		usleep_range(10000, 12000); /* Delay between two reads */
+	}
+
+	if (ret)
+		msleep(100); /* Delay to wait for VBUS ramp up if read fails */
+
+	return ret;
+}
+
+static inline void rx_msg_cleanup(struct usbpd *pd)
+{
+	struct rx_msg *msg, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	list_for_each_entry_safe(msg, tmp, &pd->rx_q, entry) {
+		list_del(&msg->entry);
+		kfree(msg);
+	}
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+}
+
+/* For PD 3.0, check SinkTxOk before allowing initiating AMS */
+static inline bool is_sink_tx_ok(struct usbpd *pd)
+{
+	if (pd->spec_rev == USBPD_REV_30)
+		return pd->typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH;
+
+	return true;
+}
+
+static void handle_state_unknown(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	union power_supply_propval val = {0};
+	int ret;
+
+	val.intval = 0;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+	if (pd->current_pr == PR_SINK) {
+		usbpd_set_state(pd, PE_SNK_STARTUP);
+	} else if (pd->current_pr == PR_SRC) {
+		if (!pd->vconn_enabled && pd->typec_mode ==
+				POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE) {
+			if (!pd->vconn) {
+				pd->vconn = devm_regulator_get(
+					pd->dev.parent, "vconn");
+				if (IS_ERR(pd->vconn)) {
+					usbpd_err(&pd->dev, "Unable to get vconn\n");
+					return;
+				}
+			}
+			ret = regulator_enable(pd->vconn);
+			if (ret)
+				usbpd_err(&pd->dev, "Unable to enable vconn\n");
+			else
+				pd->vconn_enabled = true;
+		}
+		enable_vbus(pd);
+
+		usbpd_set_state(pd, PE_SRC_STARTUP);
+	}
+}
+
+static void enter_state_error_recovery(struct usbpd *pd)
+{
+	/* perform hard disconnect/reconnect */
+	pd->in_pr_swap = false;
+	pd->current_pr = PR_NONE;
+	set_power_role(pd, PR_NONE);
+	pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
+	kick_sm(pd, 0);
+}
+
+static void enter_state_src_disabled(struct usbpd *pd)
+{
+	/* are we still connected? */
+	if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE) {
+		pd->current_pr = PR_NONE;
+		kick_sm(pd, 0);
+	}
+}
+
+static int usbpd_startup_common(struct usbpd *pd,
+	struct pd_phy_params *phy_params)
+{
+	int ret = 0;
+
+	pd_reset_protocol(pd);
+
+	if (!pd->in_pr_swap) {
+		if (pd->pd_phy_opened) {
+			pd_phy_close();
+			pd->pd_phy_opened = false;
+		}
+
+		phy_params->data_role = pd->current_dr;
+		phy_params->power_role = pd->current_pr;
+
+		if (pd->vconn_enabled)
+			phy_params->frame_filter_val |= FRAME_FILTER_EN_SOPI;
+
+		ret = pd_phy_open(phy_params);
+		if (ret) {
+			WARN_ON_ONCE(1);
+			usbpd_err(&pd->dev, "error opening PD PHY %d\n",
+					ret);
+			pd->current_state = PE_UNKNOWN;
+			return ret;
+		}
+
+		pd->pd_phy_opened = true;
+	}
+
+	return 0;
+}
+
+static void enter_state_src_startup(struct usbpd *pd)
+{
+	struct pd_phy_params phy_params = {
+		.signal_cb		= phy_sig_received,
+		.msg_rx_cb		= phy_msg_received,
+		.shutdown_cb		= phy_shutdown,
+		.frame_filter_val	= FRAME_FILTER_EN_SOP |
+					  FRAME_FILTER_EN_HARD_RESET,
+	};
+	union power_supply_propval val = {0};
+
+	if (pd->current_dr == DR_NONE) {
+		pd->current_dr = DR_DFP;
+		start_usb_host(pd, true);
+		pd->ss_lane_svid = 0x0;
+	}
+
+	dual_role_instance_changed(pd->dual_role);
+
+	/* Set CC back to DRP toggle for the next disconnect */
+	val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+
+	/* support only PD 2.0 as a source */
+	pd->spec_rev = USBPD_REV_20;
+
+	if (usbpd_startup_common(pd, &phy_params))
+		return;
+
+	if (pd->in_pr_swap) {
+		pd->in_pr_swap = false;
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PR_SWAP, &val);
+	}
+
+	if (pd->vconn_enabled) {
+		/*
+		 * wait for tVCONNStable (50ms), until SOPI becomes
+		 * ready for communication.
+		 */
+		usleep_range(50000, 51000);
+		usbpd_send_svdm(pd, USBPD_SID,
+				USBPD_SVDM_DISCOVER_IDENTITY,
+				SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+		handle_vdm_tx(pd, SOPI_MSG);
+		pd->current_state = PE_SRC_STARTUP_WAIT_FOR_VDM_RESP;
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+		return;
+	}
+	/*
+	 * A sink might remove its terminations (during some Type-C
+	 * compliance tests or a sink attempting to do Try.SRC)
+	 * at this point just after we enabled VBUS. Sending PD
+	 * messages now would delay detecting the detach beyond the
+	 * required timing. Instead, delay sending out the first
+	 * source capabilities to allow for the other side to
+	 * completely settle CC debounce and allow HW to detect detach
+	 * sooner in the meantime. PD spec allows up to
+	 * tFirstSourceCap (250ms).
+	 */
+	pd->current_state = PE_SRC_SEND_CAPABILITIES;
+	kick_sm(pd, FIRST_SOURCE_CAP_TIME);
+}
+
+static void handle_state_src_startup(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	usbpd_set_state(pd, PE_SRC_STARTUP);
+}
+
+static void handle_state_src_startup_wait_for_vdm_resp(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	int ms;
+
+	if (IS_DATA(rx_msg, MSG_VDM))
+		handle_vdm_rx(pd, rx_msg);
+
+	/* tVCONNStable (50ms) elapsed */
+	ms = FIRST_SOURCE_CAP_TIME - 50;
+
+	/* if no vdm msg received SENDER_RESPONSE_TIME elapsed */
+	if (!rx_msg)
+		ms -= SENDER_RESPONSE_TIME;
+
+	pd->current_state = PE_SRC_SEND_CAPABILITIES;
+	kick_sm(pd, ms);
+}
+
+static void enter_state_src_send_capabilities(struct usbpd *pd)
+{
+	kick_sm(pd, 0);
+}
+
+static void handle_state_src_send_capabilities(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	int ret;
+	union power_supply_propval val = {0};
+
+	ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps,
+			ARRAY_SIZE(default_src_caps), SOP_MSG);
+	if (ret) {
+		pd->caps_count++;
+		if (pd->caps_count >= PD_CAPS_COUNT) {
+			usbpd_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
+			usbpd_set_state(pd, PE_SRC_DISABLED);
+
+			val.intval = POWER_SUPPLY_PD_INACTIVE;
+			power_supply_set_property(pd->usb_psy,
+					POWER_SUPPLY_PROP_PD_ACTIVE,
+					&val);
+			return;
+		}
+
+		kick_sm(pd, SRC_CAP_TIME);
+		return;
+	}
+
+	/* transmit was successful if GoodCRC was received */
+	pd->caps_count = 0;
+	pd->hard_reset_count = 0;
+	pd->pd_connected = true; /* we know peer is PD capable */
+
+	/* wait for REQUEST */
+	pd->current_state = PE_SRC_SEND_CAPABILITIES_WAIT;
+	kick_sm(pd, SENDER_RESPONSE_TIME);
+
+	val.intval = POWER_SUPPLY_PD_ACTIVE;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+}
+
+static void handle_state_src_send_capabilities_wait(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (IS_DATA(rx_msg, MSG_REQUEST)) {
+		pd->rdo = *(u32 *)rx_msg->payload;
+		usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
+	} else if (rx_msg) {
+		usbpd_err(&pd->dev, "Unexpected message received\n");
+		usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+	} else {
+		usbpd_set_state(pd, PE_SRC_HARD_RESET);
+	}
+}
+
+static void enter_state_src_negotiate_capability(struct usbpd *pd)
+{
+	int ret;
+
+	log_decoded_request(pd, pd->rdo);
+	pd->peer_usb_comm = PD_RDO_USB_COMM(pd->rdo);
+
+	if (PD_RDO_OBJ_POS(pd->rdo) != 1 ||
+			PD_RDO_FIXED_CURR(pd->rdo) >
+				PD_SRC_PDO_FIXED_MAX_CURR(*default_src_caps)) {
+		/* send Reject */
+		ret = pd_send_msg(pd, MSG_REJECT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		usbpd_err(&pd->dev, "Invalid request: %08x\n", pd->rdo);
+
+		if (pd->in_explicit_contract)
+			usbpd_set_state(pd, PE_SRC_READY);
+		else
+			/*
+			 * bypass PE_SRC_Capability_Response and
+			 * PE_SRC_Wait_New_Capabilities in this
+			 * implementation for simplicity.
+			 */
+			usbpd_set_state(pd, PE_SRC_SEND_CAPABILITIES);
+		return;
+	}
+
+	/* PE_SRC_TRANSITION_SUPPLY pseudo-state */
+	ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+	if (ret) {
+		usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+		return;
+	}
+
+	/* tSrcTransition required after ACCEPT */
+	usleep_range(SRC_TRANSITION_TIME * USEC_PER_MSEC,
+			(SRC_TRANSITION_TIME + 5) * USEC_PER_MSEC);
+
+	/*
+	 * Normally a voltage change should occur within tSrcReady
+	 * but since we only support VSafe5V there is nothing more to
+	 * prepare from the power supply so send PS_RDY right away.
+	 */
+	ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+	if (ret) {
+		usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+		return;
+	}
+
+	usbpd_set_state(pd, PE_SRC_READY);
+}
+
+static void enter_state_src_ready(struct usbpd *pd)
+{
+	/*
+	 * USB Host stack was started at PE_SRC_STARTUP but if peer
+	 * doesn't support USB communication, we can turn it off
+	 */
+	if (pd->current_dr == DR_DFP && !pd->peer_usb_comm &&
+			!pd->in_explicit_contract)
+		stop_usb_host(pd);
+
+	pd->in_explicit_contract = true;
+
+	if (pd->vdm_tx)
+		kick_sm(pd, 0);
+	else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
+		usbpd_send_svdm(pd, USBPD_SID,
+				USBPD_SVDM_DISCOVER_IDENTITY,
+				SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+
+	kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+	complete(&pd->is_ready);
+	dual_role_instance_changed(pd->dual_role);
+}
+
+static void handle_state_src_ready(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	int ret;
+
+	if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
+		pd->current_state = PE_SRC_SEND_CAPABILITIES;
+		kick_sm(pd, 0);
+	} else if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
+		ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
+				pd->sink_caps, pd->num_sink_caps,
+				SOP_MSG);
+		if (ret)
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+	} else if (IS_DATA(rx_msg, MSG_REQUEST)) {
+		pd->rdo = *(u32 *)rx_msg->payload;
+		usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
+	} else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
+		if (pd->vdm_state == MODE_ENTERED) {
+			usbpd_set_state(pd, PE_SRC_HARD_RESET);
+			return;
+		}
+
+		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		dr_swap(pd);
+	} else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) {
+		/* we'll happily accept Src->Sink requests anytime */
+		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		usbpd_set_state(pd, PE_PRS_SRC_SNK_TRANSITION_TO_OFF);
+		return;
+	} else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
+		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		vconn_swap(pd);
+	} else if (IS_DATA(rx_msg, MSG_VDM)) {
+		handle_vdm_rx(pd, rx_msg);
+	} else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+		/* unhandled messages */
+		ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+				SOP_MSG);
+		if (ret)
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+		return;
+	} else if (pd->send_pr_swap) {
+		pd->send_pr_swap = false;
+		ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		pd->current_state = PE_PRS_SRC_SNK_SEND_SWAP;
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else if (pd->send_dr_swap) {
+		pd->send_dr_swap = false;
+		ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		pd->current_state = PE_DRS_SEND_DR_SWAP;
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else {
+		handle_vdm_tx(pd, SOP_MSG);
+	}
+}
+
+static void enter_state_hard_reset(struct usbpd *pd)
+{
+	/* are we still connected? */
+	if (pd->typec_mode == POWER_SUPPLY_TYPEC_NONE)
+		pd->current_pr = PR_NONE;
+
+	/* hard reset may sleep; handle it in the workqueue */
+	kick_sm(pd, 0);
+}
+
+static void handle_state_src_hard_reset(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	union power_supply_propval val = {0};
+
+	val.intval = 1;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+	pd_send_hard_reset(pd);
+	pd->in_explicit_contract = false;
+	pd->rdo = 0;
+	rx_msg_cleanup(pd);
+	reset_vdm_state(pd);
+	kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+
+	pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
+	kick_sm(pd, PS_HARD_RESET_TIME);
+}
+
+static void handle_state_soft_reset(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	int ret;
+
+	pd_reset_protocol(pd);
+
+	ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+	if (ret) {
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+		return;
+	}
+
+	usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+			PE_SRC_SEND_CAPABILITIES :
+			PE_SNK_WAIT_FOR_CAPABILITIES);
+}
+
+static void handle_state_src_transition_to_default(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (pd->vconn_enabled)
+		regulator_disable(pd->vconn);
+	pd->vconn_enabled = false;
+
+	if (pd->vbus_enabled)
+		regulator_disable(pd->vbus);
+	pd->vbus_enabled = false;
+
+	if (pd->current_dr != DR_DFP) {
+		extcon_set_state_sync(pd->extcon, EXTCON_USB, 0);
+		pd->current_dr = DR_DFP;
+		pd_phy_update_roles(pd->current_dr, pd->current_pr);
+	}
+
+	/* PE_UNKNOWN will turn on VBUS and go back to PE_SRC_STARTUP */
+	pd->current_state = PE_UNKNOWN;
+	kick_sm(pd, SRC_RECOVER_TIME);
+}
+
+static void enter_state_snk_startup(struct usbpd *pd)
+{
+	struct pd_phy_params phy_params = {
+		.signal_cb		= phy_sig_received,
+		.msg_rx_cb		= phy_msg_received,
+		.shutdown_cb		= phy_shutdown,
+		.frame_filter_val	= FRAME_FILTER_EN_SOP |
+					  FRAME_FILTER_EN_HARD_RESET,
+	};
+	union power_supply_propval val = {0};
+
+	if (pd->current_dr == DR_NONE || pd->current_dr == DR_UFP)
+		pd->current_dr = DR_UFP;
+
+	dual_role_instance_changed(pd->dual_role);
+
+	/*
+	 * support up to PD 3.0 as a sink; if source is 2.0
+	 * phy_msg_received() will handle the downgrade.
+	 */
+	pd->spec_rev = USBPD_REV_30;
+
+	if (usbpd_startup_common(pd, &phy_params))
+		return;
+
+	pd->current_voltage = pd->requested_voltage = 5000000;
+	val.intval = pd->requested_voltage; /* set max range to 5V */
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_VOLTAGE_MAX, &val);
+
+	if (!pd->vbus_present) {
+		pd->current_state = PE_SNK_DISCOVERY;
+		/* max time for hard reset to turn vbus back on */
+		kick_sm(pd, SNK_HARD_RESET_VBUS_ON_TIME);
+		return;
+	}
+
+	usbpd_set_state(pd, PE_SNK_WAIT_FOR_CAPABILITIES);
+}
+
+static void handle_state_snk_startup(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	usbpd_set_state(pd, PE_SNK_STARTUP);
+}
+
+static void handle_state_snk_discovery(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	if (!rx_msg) {
+		if (pd->vbus_present)
+			usbpd_set_state(pd,
+					PE_SNK_WAIT_FOR_CAPABILITIES);
+
+		/*
+		 * Handle disconnection in the middle of PR_Swap.
+		 * Since in psy_changed() if pd->in_pr_swap is true
+		 * we ignore the typec_mode==NONE change since that is
+		 * expected to happen. However if the cable really did
+		 * get disconnected we need to check for it here after
+		 * waiting for VBUS presence times out.
+		 */
+		if (!pd->typec_mode) {
+			pd->current_pr = PR_NONE;
+			kick_sm(pd, 0);
+		}
+
+		return;
+	}
+
+	handle_state_snk_wait_for_capabilities(pd, rx_msg);
+}
+
+static void enter_state_snk_wait_for_capabilities(struct usbpd *pd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	if (list_empty(&pd->rx_q))
+		kick_sm(pd, SINK_WAIT_CAP_TIME);
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+}
+
+static void handle_state_snk_wait_for_capabilities(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	union power_supply_propval val = {0};
+
+	pd->in_pr_swap = false;
+	val.intval = 0;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PR_SWAP, &val);
+
+	if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+				&val);
+
+		/* save the PDOs so userspace can further evaluate */
+		memset(&pd->received_pdos, 0,
+				sizeof(pd->received_pdos));
+		memcpy(&pd->received_pdos, rx_msg->payload,
+				min_t(size_t, rx_msg->data_len,
+					sizeof(pd->received_pdos)));
+		pd->src_cap_id++;
+
+		usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
+	} else if (pd->hard_reset_count < 3) {
+		usbpd_set_state(pd, PE_SNK_HARD_RESET);
+	} else {
+		usbpd_dbg(&pd->dev, "Sink hard reset count exceeded, disabling PD\n");
+
+		val.intval = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_IN_HARD_RESET,
+				&val);
+
+		val.intval = POWER_SUPPLY_PD_INACTIVE;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+	}
+}
+
+static void enter_state_snk_evaluate_capability(struct usbpd *pd)
+{
+	int ret;
+
+	pd->hard_reset_count = 0;
+
+	/* evaluate PDOs and select one */
+	ret = pd_eval_src_caps(pd);
+	if (ret < 0) {
+		usbpd_err(&pd->dev, "Invalid src_caps received. Skipping request\n");
+		return;
+	}
+
+	pd->pd_connected = true; /* we know peer is PD capable */
+	usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
+}
+
+static void enter_state_snk_select_capability(struct usbpd *pd)
+{
+	int ret;
+
+	log_decoded_request(pd, pd->rdo);
+
+	ret = pd_send_msg(pd, MSG_REQUEST, &pd->rdo, 1, SOP_MSG);
+	if (ret) {
+		usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+		return;
+	}
+
+	/* wait for ACCEPT */
+	kick_sm(pd, SENDER_RESPONSE_TIME);
+}
+
+static void handle_state_snk_select_capability(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	int ret;
+	union power_supply_propval val = {0};
+
+	if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+		u32 pdo = pd->received_pdos[pd->requested_pdo - 1];
+		bool same_pps = (pd->selected_pdo == pd->requested_pdo)
+			&& (PD_SRC_PDO_TYPE(pdo) ==
+					PD_SRC_PDO_TYPE_AUGMENTED);
+
+		usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
+
+		/* prepare for voltage increase/decrease */
+		val.intval = pd->requested_voltage;
+		power_supply_set_property(pd->usb_psy,
+			pd->requested_voltage >= pd->current_voltage ?
+				POWER_SUPPLY_PROP_PD_VOLTAGE_MAX :
+				POWER_SUPPLY_PROP_PD_VOLTAGE_MIN,
+				&val);
+
+		/*
+		 * if changing voltages (not within the same PPS PDO),
+		 * we must lower input current to pSnkStdby (2.5W).
+		 * Calculate it and set PD_CURRENT_MAX accordingly.
+		 */
+		if (!same_pps &&
+			pd->requested_voltage != pd->current_voltage) {
+			int mv = max(pd->requested_voltage,
+					pd->current_voltage) / 1000;
+			val.intval = (2500000 / mv) * 1000;
+			power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+		} else {
+			/* decreasing current? */
+			ret = power_supply_get_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+			if (!ret &&
+				pd->requested_current < val.intval) {
+				val.intval =
+					pd->requested_current * 1000;
+				power_supply_set_property(pd->usb_psy,
+				     POWER_SUPPLY_PROP_PD_CURRENT_MAX,
+				     &val);
+			}
+		}
+
+		pd->selected_pdo = pd->requested_pdo;
+	} else if (IS_CTRL(rx_msg, MSG_REJECT) ||
+			IS_CTRL(rx_msg, MSG_WAIT)) {
+		if (pd->in_explicit_contract)
+			usbpd_set_state(pd, PE_SNK_READY);
+		else
+			usbpd_set_state(pd,
+					PE_SNK_WAIT_FOR_CAPABILITIES);
+	} else if (rx_msg) {
+		usbpd_err(&pd->dev, "Invalid response to sink request\n");
+		usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+	} else {
+		/* timed out; go to hard reset */
+		usbpd_set_state(pd, PE_SNK_HARD_RESET);
+	}
+}
+
+static void enter_state_snk_transition_sink(struct usbpd *pd)
+{
+	/* wait for PS_RDY */
+	kick_sm(pd, PS_TRANSITION_TIME);
+}
+
+static void handle_state_snk_transition_sink(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	union power_supply_propval val = {0};
+
+	if (IS_CTRL(rx_msg, MSG_PS_RDY)) {
+		val.intval = pd->requested_voltage;
+		power_supply_set_property(pd->usb_psy,
+			pd->requested_voltage >= pd->current_voltage ?
+				POWER_SUPPLY_PROP_PD_VOLTAGE_MIN :
+				POWER_SUPPLY_PROP_PD_VOLTAGE_MAX, &val);
+		pd->current_voltage = pd->requested_voltage;
+
+		/* resume charging */
+		val.intval = pd->requested_current * 1000; /* mA->uA */
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+
+		usbpd_set_state(pd, PE_SNK_READY);
+	} else {
+		/* timed out; go to hard reset */
+		usbpd_set_state(pd, PE_SNK_HARD_RESET);
+	}
+}
+
+static void enter_state_snk_ready(struct usbpd *pd)
+{
+	pd->in_explicit_contract = true;
+
+	if (pd->vdm_tx)
+		kick_sm(pd, 0);
+	else if (pd->current_dr == DR_DFP && pd->vdm_state == VDM_NONE)
+		usbpd_send_svdm(pd, USBPD_SID,
+				USBPD_SVDM_DISCOVER_IDENTITY,
+				SVDM_CMD_TYPE_INITIATOR, 0, NULL, 0);
+
+	kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+	complete(&pd->is_ready);
+	dual_role_instance_changed(pd->dual_role);
+}
+
+static void handle_ctrl_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	int ret;
+
+	if (IS_CTRL(rx_msg, MSG_GET_SINK_CAP)) {
+		ret = pd_send_msg(pd, MSG_SINK_CAPABILITIES,
+				pd->sink_caps, pd->num_sink_caps,
+				SOP_MSG);
+		if (ret)
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+	} else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP) &&
+			pd->spec_rev == USBPD_REV_20) {
+		ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
+				default_src_caps,
+				ARRAY_SIZE(default_src_caps), SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+	} else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
+		if (pd->vdm_state == MODE_ENTERED) {
+			usbpd_set_state(pd, PE_SNK_HARD_RESET);
+			return;
+		}
+
+		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		dr_swap(pd);
+	} else if (IS_CTRL(rx_msg, MSG_PR_SWAP) &&
+			pd->spec_rev == USBPD_REV_20) {
+		/* TODO: should we Reject in certain circumstances? */
+		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+		return;
+	} else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP) &&
+			pd->spec_rev == USBPD_REV_20) {
+		/*
+		 * if VCONN is connected to VBUS, make sure we are
+		 * not in high voltage contract, otherwise reject.
+		 */
+		if (!pd->vconn_is_external &&
+				(pd->requested_voltage > 5000000)) {
+			ret = pd_send_msg(pd, MSG_REJECT, NULL, 0,
+					SOP_MSG);
+			if (ret)
+				usbpd_set_state(pd,
+						PE_SEND_SOFT_RESET);
+
+			return;
+		}
+
+		ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+		vconn_swap(pd);
+	}
+}
+
+static void handle_data_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	if (IS_DATA(rx_msg, MSG_SOURCE_CAPABILITIES)) {
+		/* save the PDOs so userspace can further evaluate */
+		memset(&pd->received_pdos, 0,
+				sizeof(pd->received_pdos));
+		memcpy(&pd->received_pdos, rx_msg->payload,
+				min_t(size_t, rx_msg->data_len,
+					sizeof(pd->received_pdos)));
+		pd->src_cap_id++;
+
+		usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
+	} else if (IS_DATA(rx_msg, MSG_VDM)) {
+		handle_vdm_rx(pd, rx_msg);
+	} else if (IS_DATA(rx_msg, MSG_ALERT)) {
+		u32 ado;
+
+		if (rx_msg->data_len != sizeof(ado)) {
+			usbpd_err(&pd->dev, "Invalid ado\n");
+			return;
+		}
+		memcpy(&ado, rx_msg->payload, sizeof(ado));
+		usbpd_dbg(&pd->dev, "Received Alert 0x%08x\n", ado);
+
+		/*
+		 * Don't send Get_Status right away so we can coalesce
+		 * multiple Alerts. 150ms should be enough to not get
+		 * in the way of any other AMS that might happen.
+		 */
+		pd->send_get_status = true;
+		kick_sm(pd, 150);
+	}
+}
+
+static void handle_ext_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	if (IS_EXT(rx_msg, MSG_SOURCE_CAPABILITIES_EXTENDED)) {
+		if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) {
+			usbpd_err(&pd->dev, "Invalid src cap ext db\n");
+			return;
+		}
+		memcpy(&pd->src_cap_ext_db, rx_msg->payload,
+			sizeof(pd->src_cap_ext_db));
+		complete(&pd->is_ready);
+	} else if (IS_EXT(rx_msg, MSG_PPS_STATUS)) {
+		if (rx_msg->data_len != sizeof(pd->pps_status_db)) {
+			usbpd_err(&pd->dev, "Invalid pps status db\n");
+			return;
+		}
+		memcpy(&pd->pps_status_db, rx_msg->payload,
+			sizeof(pd->pps_status_db));
+		complete(&pd->is_ready);
+	} else if (IS_EXT(rx_msg, MSG_STATUS)) {
+		if (rx_msg->data_len != PD_STATUS_DB_LEN) {
+			usbpd_err(&pd->dev, "Invalid status db\n");
+			return;
+		}
+		memcpy(&pd->status_db, rx_msg->payload,
+			sizeof(pd->status_db));
+		kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+	} else if (IS_EXT(rx_msg, MSG_BATTERY_CAPABILITIES)) {
+		if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) {
+			usbpd_err(&pd->dev, "Invalid battery cap db\n");
+			return;
+		}
+		memcpy(&pd->battery_cap_db, rx_msg->payload,
+			sizeof(pd->battery_cap_db));
+		complete(&pd->is_ready);
+	} else if (IS_EXT(rx_msg, MSG_BATTERY_STATUS)) {
+		if (rx_msg->data_len != sizeof(pd->battery_sts_dobj)) {
+			usbpd_err(&pd->dev, "Invalid bat sts dobj\n");
+			return;
+		}
+		memcpy(&pd->battery_sts_dobj, rx_msg->payload,
+			sizeof(pd->battery_sts_dobj));
+		complete(&pd->is_ready);
+	}
+}
+
+static void handle_snk_ready_tx(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	int ret;
+
+	if (pd->send_get_src_cap_ext) {
+		pd->send_get_src_cap_ext = false;
+		ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL,
+			0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else if (pd->send_get_pps_status) {
+		pd->send_get_pps_status = false;
+		ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL,
+			0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else if (pd->send_get_status) {
+		pd->send_get_status = false;
+		ret = pd_send_msg(pd, MSG_GET_STATUS, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else if (pd->send_get_battery_cap) {
+		pd->send_get_battery_cap = false;
+		ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP,
+			&pd->get_battery_cap_db, 1, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else if (pd->send_get_battery_status) {
+		pd->send_get_battery_status = false;
+		ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS,
+			&pd->get_battery_status_db, 1, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else if (pd->send_request) {
+		pd->send_request = false;
+		usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
+	} else if (pd->send_pr_swap) {
+		pd->send_pr_swap = false;
+		ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		pd->current_state = PE_PRS_SNK_SRC_SEND_SWAP;
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else if (pd->send_dr_swap) {
+		pd->send_dr_swap = false;
+		ret = pd_send_msg(pd, MSG_DR_SWAP, NULL, 0, SOP_MSG);
+		if (ret) {
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+			return;
+		}
+
+		pd->current_state = PE_DRS_SEND_DR_SWAP;
+		kick_sm(pd, SENDER_RESPONSE_TIME);
+	} else {
+		handle_vdm_tx(pd, SOP_MSG);
+	}
+}
+
+static void handle_state_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
+{
+	int ret;
+
+	if (rx_msg && !PD_MSG_HDR_COUNT(rx_msg->hdr)) {
+		handle_ctrl_snk_ready(pd, rx_msg);
+		return;
+	} else if (rx_msg && !PD_MSG_HDR_IS_EXTENDED(rx_msg->hdr)) {
+		handle_data_snk_ready(pd, rx_msg);
+		return;
+	} else if (rx_msg && PD_MSG_HDR_IS_EXTENDED(rx_msg->hdr)) {
+		handle_ext_snk_ready(pd, rx_msg);
+	} else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+		/* unhandled messages */
+		ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+				SOP_MSG);
+		if (ret)
+			usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+		return;
+	}
+
+	if (is_sink_tx_ok(pd))
+		handle_snk_ready_tx(pd, rx_msg);
+}
+
+static void handle_state_snk_hard_reset(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	union power_supply_propval val = {0};
+
+	/* prepare charger for VBUS change */
+	val.intval = 1;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+	pd->requested_voltage = 5000000;
+
+	if (pd->requested_current) {
+		val.intval = pd->requested_current = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+	}
+
+	val.intval = pd->requested_voltage;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, &val);
+
+	pd_send_hard_reset(pd);
+	pd->in_explicit_contract = false;
+	pd->selected_pdo = pd->requested_pdo = 0;
+	pd->rdo = 0;
+	reset_vdm_state(pd);
+	kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+	usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
+}
+
+static void enter_state_snk_transition_to_default(struct usbpd *pd)
+{
+	if (pd->current_dr != DR_UFP) {
+		stop_usb_host(pd);
+		start_usb_peripheral(pd);
+		pd->current_dr = DR_UFP;
+		pd_phy_update_roles(pd->current_dr, pd->current_pr);
+	}
+	if (pd->vconn_enabled) {
+		regulator_disable(pd->vconn);
+		pd->vconn_enabled = false;
+	}
+
+	/* max time for hard reset to turn vbus off */
+	kick_sm(pd, SNK_HARD_RESET_VBUS_OFF_TIME);
+}
+
+static void handle_state_snk_transition_to_default(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	usbpd_set_state(pd, PE_SNK_STARTUP);
+}
+
+static void handle_state_drs_send_dr_swap(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (IS_CTRL(rx_msg, MSG_ACCEPT))
+		dr_swap(pd);
+
+	usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+			PE_SRC_READY : PE_SNK_READY);
+}
+
+static void handle_state_prs_snk_src_send_swap(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (!IS_CTRL(rx_msg, MSG_ACCEPT)) {
+		pd->current_state = PE_SNK_READY;
+		return;
+	}
+
+	usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
+}
+
+static void enter_state_prs_snk_src_transition_to_off(struct usbpd *pd)
+{
+	union power_supply_propval val = {0};
+
+	val.intval = pd->in_pr_swap = true;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PR_SWAP, &val);
+
+	/* lock in current mode */
+	set_power_role(pd, pd->current_pr);
+
+	val.intval = pd->requested_current = 0; /* suspend charging */
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+
+	pd->in_explicit_contract = false;
+
+	/*
+	 * need to update PR bit in message header so that
+	 * proper GoodCRC is sent when receiving next PS_RDY
+	 */
+	pd_phy_update_roles(pd->current_dr, PR_SRC);
+
+	/* wait for PS_RDY */
+	kick_sm(pd, PS_SOURCE_OFF);
+}
+
+static void handle_state_prs_snk_src_transition_to_off(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (!IS_CTRL(rx_msg, MSG_PS_RDY)) {
+		usbpd_set_state(pd, PE_ERROR_RECOVERY);
+		return;
+	}
+
+	/* PE_PRS_SNK_SRC_Assert_Rp */
+	pd->current_pr = PR_SRC;
+	set_power_role(pd, pd->current_pr);
+	pd->current_state = PE_PRS_SNK_SRC_SOURCE_ON;
+
+	handle_state_prs_snk_src_source_on(pd, rx_msg);
+}
+
+static void handle_state_prs_snk_src_source_on(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	int ret;
+
+	enable_vbus(pd);
+
+	ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+	if (ret) {
+		usbpd_set_state(pd, PE_ERROR_RECOVERY);
+		return;
+	}
+
+	usbpd_set_state(pd, PE_SRC_STARTUP);
+}
+
+static void handle_state_prs_src_snk_send_swap(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (!IS_CTRL(rx_msg, MSG_ACCEPT)) {
+		pd->current_state = PE_SRC_READY;
+		return;
+	}
+
+	usbpd_set_state(pd, PE_PRS_SRC_SNK_TRANSITION_TO_OFF);
+}
+
+static void enter_state_prs_src_snk_transition_to_off(struct usbpd *pd)
+{
+	union power_supply_propval val = {0};
+
+	val.intval = pd->in_pr_swap = true;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PR_SWAP, &val);
+	pd->in_explicit_contract = false;
+
+	/* lock in current mode */
+	set_power_role(pd, pd->current_pr);
+
+	kick_sm(pd, SRC_TRANSITION_TIME);
+}
+
+static void handle_state_prs_src_snk_transition_to_off(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	int ret;
+
+	if (pd->vbus_enabled) {
+		regulator_disable(pd->vbus);
+		pd->vbus_enabled = false;
+	}
+
+	/* PE_PRS_SRC_SNK_Assert_Rd */
+	pd->current_pr = PR_SINK;
+	set_power_role(pd, pd->current_pr);
+	pd_phy_update_roles(pd->current_dr, pd->current_pr);
+
+	/* allow time for Vbus discharge, must be < tSrcSwapStdby */
+	msleep(500);
+
+	ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
+	if (ret) {
+		usbpd_set_state(pd, PE_ERROR_RECOVERY);
+		return;
+	}
+
+	pd->current_state = PE_PRS_SRC_SNK_WAIT_SOURCE_ON;
+	kick_sm(pd, PS_SOURCE_ON);
+}
+
+static void handle_state_prs_src_snk_wait_source_on(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (IS_CTRL(rx_msg, MSG_PS_RDY))
+		usbpd_set_state(pd, PE_SNK_STARTUP);
+	else
+		usbpd_set_state(pd, PE_ERROR_RECOVERY);
+}
+
+static void enter_state_send_soft_reset(struct usbpd *pd)
+{
+	int ret;
+
+	pd_reset_protocol(pd);
+
+	ret = pd_send_msg(pd, MSG_SOFT_RESET, NULL, 0, SOP_MSG);
+	if (ret) {
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+		return;
+	}
+
+	/* wait for ACCEPT */
+	kick_sm(pd, SENDER_RESPONSE_TIME);
+}
+
+static void handle_state_send_soft_reset(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_SEND_CAPABILITIES :
+				PE_SNK_WAIT_FOR_CAPABILITIES);
+	} else {
+		usbpd_err(&pd->dev, "%s: Did not see Accept, do Hard Reset\n",
+				usbpd_state_strings[pd->current_state]);
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+	}
+}
+
+static void handle_state_vcs_wait_for_vconn(struct usbpd *pd,
+	struct rx_msg *rx_msg)
+{
+	if (IS_CTRL(rx_msg, MSG_PS_RDY)) {
+		/*
+		 * hopefully redundant check but in case not enabled
+		 * avoids unbalanced regulator disable count
+		 */
+		if (pd->vconn_enabled)
+			regulator_disable(pd->vconn);
+		pd->vconn_enabled = false;
+
+		pd->current_state = pd->current_pr == PR_SRC ?
+			PE_SRC_READY : PE_SNK_READY;
+	} else {
+		/* timed out; go to hard reset */
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+	}
+}
+
+/* Enters new state and executes actions on entry */
+static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
+{
+	if (pd->hard_reset_recvd) /* let usbpd_sm handle it */
+		return;
+
+	usbpd_dbg(&pd->dev, "%s -> %s\n",
+			usbpd_state_strings[pd->current_state],
+			usbpd_state_strings[next_state]);
+
+	pd->current_state = next_state;
+
+	if (pd->current_state < PE_MAX_STATES &&
+		state_handlers[pd->current_state].enter_state)
+		state_handlers[pd->current_state].enter_state(pd);
+	else
+		usbpd_dbg(&pd->dev, "No action for state %s\n",
+				usbpd_state_strings[pd->current_state]);
+}
+
+static const struct usbpd_state_handler state_handlers[] = {
+	[PE_UNKNOWN] = {NULL, handle_state_unknown},
+	[PE_ERROR_RECOVERY] = {enter_state_error_recovery, NULL},
+	[PE_SRC_DISABLED] = {enter_state_src_disabled, NULL},
+	[PE_SRC_STARTUP] = {enter_state_src_startup, handle_state_src_startup},
+	[PE_SRC_STARTUP_WAIT_FOR_VDM_RESP] = {NULL,
+				handle_state_src_startup_wait_for_vdm_resp},
+	[PE_SRC_SEND_CAPABILITIES] = {enter_state_src_send_capabilities,
+				handle_state_src_send_capabilities},
+	[PE_SRC_SEND_CAPABILITIES_WAIT] = {NULL,
+				handle_state_src_send_capabilities_wait},
+	[PE_SRC_NEGOTIATE_CAPABILITY] = {enter_state_src_negotiate_capability,
+					NULL},
+	[PE_SRC_READY] = {enter_state_src_ready, handle_state_src_ready},
+	[PE_SRC_HARD_RESET] = {enter_state_hard_reset,
+				handle_state_src_hard_reset},
+	[PE_SRC_SOFT_RESET] = {NULL, handle_state_soft_reset},
+	[PE_SRC_TRANSITION_TO_DEFAULT] = {NULL,
+				handle_state_src_transition_to_default},
+	[PE_SNK_STARTUP] = {enter_state_snk_startup,
+				handle_state_snk_startup},
+	[PE_SNK_DISCOVERY] = {NULL, handle_state_snk_discovery},
+	[PE_SNK_WAIT_FOR_CAPABILITIES] = {
+					enter_state_snk_wait_for_capabilities,
+					handle_state_snk_wait_for_capabilities},
+	[PE_SNK_EVALUATE_CAPABILITY] = {enter_state_snk_evaluate_capability,
+					NULL},
+	[PE_SNK_SELECT_CAPABILITY] = {enter_state_snk_select_capability,
+					handle_state_snk_select_capability},
+	[PE_SNK_TRANSITION_SINK] = {enter_state_snk_transition_sink,
+					handle_state_snk_transition_sink},
+	[PE_SNK_READY] = {enter_state_snk_ready, handle_state_snk_ready},
+	[PE_SNK_HARD_RESET] = {enter_state_hard_reset,
+				handle_state_snk_hard_reset},
+	[PE_SNK_SOFT_RESET] = {NULL, handle_state_soft_reset},
+	[PE_SNK_TRANSITION_TO_DEFAULT] = {
+				enter_state_snk_transition_to_default,
+				handle_state_snk_transition_to_default},
+	[PE_DRS_SEND_DR_SWAP] = {NULL, handle_state_drs_send_dr_swap},
+	[PE_PRS_SNK_SRC_SEND_SWAP] = {NULL, handle_state_prs_snk_src_send_swap},
+	[PE_PRS_SNK_SRC_TRANSITION_TO_OFF] = {
+				enter_state_prs_snk_src_transition_to_off,
+				handle_state_prs_snk_src_transition_to_off},
+	[PE_PRS_SNK_SRC_SOURCE_ON] = {NULL, handle_state_prs_snk_src_source_on},
+	[PE_PRS_SRC_SNK_SEND_SWAP] = {NULL, handle_state_prs_src_snk_send_swap},
+	[PE_PRS_SRC_SNK_TRANSITION_TO_OFF] = {
+				enter_state_prs_src_snk_transition_to_off,
+				handle_state_prs_src_snk_transition_to_off},
+	[PE_PRS_SRC_SNK_WAIT_SOURCE_ON] = {NULL,
+			handle_state_prs_src_snk_wait_source_on},
+	[PE_SEND_SOFT_RESET] = {enter_state_send_soft_reset,
+				handle_state_send_soft_reset},
+	[PE_VCS_WAIT_FOR_VCONN] = {NULL, handle_state_vcs_wait_for_vconn},
+};
+
+static void handle_disconnect(struct usbpd *pd)
+{
+	union power_supply_propval val = {0};
+
+	if (pd->vconn_enabled) {
+		regulator_disable(pd->vconn);
+		pd->vconn_enabled = false;
+	}
+
+	usbpd_info(&pd->dev, "USB Type-C disconnect\n");
+
+	if (pd->pd_phy_opened) {
+		pd_phy_close();
+		pd->pd_phy_opened = false;
+	}
+
+	pd->in_pr_swap = false;
+	pd->pd_connected = false;
+	pd->in_explicit_contract = false;
+	pd->hard_reset_recvd = false;
+	pd->caps_count = 0;
+	pd->hard_reset_count = 0;
+	pd->requested_voltage = 0;
+	pd->requested_current = 0;
+	pd->selected_pdo = pd->requested_pdo = 0;
+	pd->peer_usb_comm = pd->peer_pr_swap = pd->peer_dr_swap = false;
+	memset(&pd->received_pdos, 0, sizeof(pd->received_pdos));
+	rx_msg_cleanup(pd);
+
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_IN_HARD_RESET, &val);
+
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+			&val);
+
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
+	if (pd->vbus_enabled) {
+		regulator_disable(pd->vbus);
+		pd->vbus_enabled = false;
+	}
+
+	reset_vdm_state(pd);
+	if (pd->current_dr == DR_UFP)
+		stop_usb_peripheral(pd);
+	else if (pd->current_dr == DR_DFP)
+		stop_usb_host(pd);
+
+	pd->current_dr = DR_NONE;
+
+	if (pd->current_state == PE_ERROR_RECOVERY)
+		/* forced disconnect, wait before resetting to DRP */
+		usleep_range(ERROR_RECOVERY_TIME * USEC_PER_MSEC,
+			(ERROR_RECOVERY_TIME + 5) * USEC_PER_MSEC);
+
+	val.intval = 0;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PR_SWAP, &val);
+
+	/* set due to dual_role class "mode" change */
+	if (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE)
+		val.intval = pd->forced_pr;
+	else
+		/* Set CC back to DRP toggle */
+		val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+	pd->forced_pr = POWER_SUPPLY_TYPEC_PR_NONE;
+
+	pd->current_state = PE_UNKNOWN;
+
+	kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+	dual_role_instance_changed(pd->dual_role);
+
+	if (pd->has_dp) {
+		pd->has_dp = false;
+
+		/* Set to USB only mode when cable disconnected */
+		extcon_blocking_sync(pd->extcon, EXTCON_DISP_DP, 0);
+	}
+}
+
+static void handle_hard_reset(struct usbpd *pd)
+{
+	union power_supply_propval val = {0};
+
+	pd->hard_reset_recvd = false;
+
+	if (pd->requested_current) {
+		val.intval = pd->requested_current = 0;
+		power_supply_set_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_PD_CURRENT_MAX, &val);
+	}
+
+	pd->requested_voltage = 5000000;
+	val.intval = pd->requested_voltage;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PD_VOLTAGE_MIN, &val);
+
+	pd->in_pr_swap = false;
+	val.intval = 0;
+	power_supply_set_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PR_SWAP, &val);
+
+	pd->in_explicit_contract = false;
+	pd->selected_pdo = pd->requested_pdo = 0;
+	pd->rdo = 0;
+	rx_msg_cleanup(pd);
+	reset_vdm_state(pd);
+	kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+
+	if (pd->current_pr == PR_SINK) {
+		usbpd_set_state(pd, PE_SNK_TRANSITION_TO_DEFAULT);
+	} else {
+		s64 delta = ktime_ms_delta(ktime_get(),
+				pd->hard_reset_recvd_time);
+		pd->current_state = PE_SRC_TRANSITION_TO_DEFAULT;
+		if (delta >= PS_HARD_RESET_TIME)
+			kick_sm(pd, 0);
+		else
+			kick_sm(pd, PS_HARD_RESET_TIME - (int)delta);
+	}
+}
+
+/* Handles current state and determines transitions */
+static void usbpd_sm(struct work_struct *w)
+{
+	struct usbpd *pd = container_of(w, struct usbpd, sm_work);
+	int ret;
+	struct rx_msg *rx_msg = NULL;
+	unsigned long flags;
+
+	usbpd_dbg(&pd->dev, "handle state %s\n",
+			usbpd_state_strings[pd->current_state]);
+
+	hrtimer_cancel(&pd->timer);
+	pd->sm_queued = false;
+
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	if (!list_empty(&pd->rx_q)) {
+		rx_msg = list_first_entry(&pd->rx_q, struct rx_msg, entry);
+		list_del(&rx_msg->entry);
+	}
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+	/* Disconnect? */
+	if (pd->current_pr == PR_NONE) {
+		if (pd->current_state == PE_UNKNOWN)
+			goto sm_done;
+
+		handle_disconnect(pd);
+		goto sm_done;
+	}
+
+	/* Hard reset? */
+	if (pd->hard_reset_recvd) {
+		handle_hard_reset(pd);
+		goto sm_done;
+	}
+
+	/* Soft reset? */
+	if (IS_CTRL(rx_msg, MSG_SOFT_RESET)) {
+		usbpd_dbg(&pd->dev, "Handle soft reset\n");
+
+		if (pd->current_pr == PR_SRC)
+			pd->current_state = PE_SRC_SOFT_RESET;
+		else if (pd->current_pr == PR_SINK)
+			pd->current_state = PE_SNK_SOFT_RESET;
+	}
+
+	if (pd->current_state < PE_MAX_STATES &&
+			state_handlers[pd->current_state].handle_state)
+		state_handlers[pd->current_state].handle_state(pd, rx_msg);
+	else
+		usbpd_err(&pd->dev, "Unhandled state %s\n",
+				usbpd_state_strings[pd->current_state]);
+sm_done:
+	kfree(rx_msg);
+
+	spin_lock_irqsave(&pd->rx_lock, flags);
+	ret = list_empty(&pd->rx_q);
+	spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+	/* requeue if there are any new/pending RX messages */
+	if (!ret)
+		kick_sm(pd, 0);
+
+	if (!pd->sm_queued)
+		pm_relax(&pd->dev);
+}
+
+static inline const char *src_current(enum power_supply_typec_mode typec_mode)
+{
+	switch (typec_mode) {
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+		return "default";
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+		return "medium - 1.5A";
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		return "high - 3.0A";
+	default:
+		return "";
+	}
+}
+
+static int usbpd_process_typec_mode(struct usbpd *pd,
+	enum power_supply_typec_mode typec_mode)
+{
+	switch (typec_mode) {
+	/* Disconnect */
+	case POWER_SUPPLY_TYPEC_NONE:
+		if (pd->in_pr_swap) {
+			usbpd_dbg(&pd->dev, "Ignoring disconnect due to PR swap\n");
+			return 0;
+		}
+
+		pd->current_pr = PR_NONE;
+		break;
+
+	/* Sink states */
+	case POWER_SUPPLY_TYPEC_SOURCE_DEFAULT:
+	case POWER_SUPPLY_TYPEC_SOURCE_MEDIUM:
+	case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
+		usbpd_info(&pd->dev, "Type-C Source (%s) connected\n",
+				src_current(typec_mode));
+
+		/* if waiting for SinkTxOk to start an AMS */
+		if (pd->spec_rev == USBPD_REV_30 &&
+			typec_mode == POWER_SUPPLY_TYPEC_SOURCE_HIGH &&
+			(pd->send_pr_swap || pd->send_dr_swap || pd->vdm_tx))
+			break;
+
+		if (pd->current_pr == PR_SINK)
+			return 0;
+
+		/*
+		 * Unexpected if not in PR swap; need to force disconnect from
+		 * source so we can turn off VBUS, Vconn, PD PHY etc.
+		 */
+		if (pd->current_pr == PR_SRC) {
+			usbpd_info(&pd->dev, "Forcing disconnect from source mode\n");
+			pd->current_pr = PR_NONE;
+			break;
+		}
+
+		pd->current_pr = PR_SINK;
+		break;
+
+	/* Source states */
+	case POWER_SUPPLY_TYPEC_SINK_POWERED_CABLE:
+	case POWER_SUPPLY_TYPEC_SINK:
+		usbpd_info(&pd->dev, "Type-C Sink%s connected\n",
+				typec_mode == POWER_SUPPLY_TYPEC_SINK ?
+					"" : " (powered)");
+
+		if (pd->current_pr == PR_SRC)
+			return 0;
+
+		pd->current_pr = PR_SRC;
+		break;
+
+	case POWER_SUPPLY_TYPEC_SINK_DEBUG_ACCESSORY:
+		usbpd_info(&pd->dev, "Type-C Debug Accessory connected\n");
+		break;
+	case POWER_SUPPLY_TYPEC_SINK_AUDIO_ADAPTER:
+		usbpd_info(&pd->dev, "Type-C Analog Audio Adapter connected\n");
+		break;
+	default:
+		usbpd_warn(&pd->dev, "Unsupported typec mode:%d\n",
+				typec_mode);
+		break;
+	}
+
+	return 1; /* kick state machine */
+}
+
+static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
+{
+	struct usbpd *pd = container_of(nb, struct usbpd, psy_nb);
+	union power_supply_propval val;
+	enum power_supply_typec_mode typec_mode;
+	int ret;
+
+	if (ptr != pd->usb_psy || evt != PSY_EVENT_PROP_CHANGED)
+		return 0;
+
+	ret = power_supply_get_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_TYPEC_MODE, &val);
+	if (ret) {
+		usbpd_err(&pd->dev, "Unable to read USB TYPEC_MODE: %d\n", ret);
+		return ret;
+	}
+
+	typec_mode = val.intval;
+
+	ret = power_supply_get_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PE_START, &val);
+	if (ret) {
+		usbpd_err(&pd->dev, "Unable to read USB PROP_PE_START: %d\n",
+				ret);
+		return ret;
+	}
+
+	/* Don't proceed if PE_START=0; start USB directly if needed */
+	if (!val.intval && !pd->pd_connected &&
+			typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
+		ret = power_supply_get_property(pd->usb_psy,
+				POWER_SUPPLY_PROP_REAL_TYPE, &val);
+		if (ret) {
+			usbpd_err(&pd->dev, "Unable to read USB TYPE: %d\n",
+					ret);
+			return ret;
+		}
+
+		if (val.intval == POWER_SUPPLY_TYPE_USB ||
+			val.intval == POWER_SUPPLY_TYPE_USB_CDP ||
+			val.intval == POWER_SUPPLY_TYPE_USB_FLOAT) {
+			usbpd_dbg(&pd->dev, "typec mode:%d type:%d\n",
+				typec_mode, val.intval);
+			pd->typec_mode = typec_mode;
+			queue_work(pd->wq, &pd->start_periph_work);
+		}
+
+		return 0;
+	}
+
+	ret = power_supply_get_property(pd->usb_psy,
+			POWER_SUPPLY_PROP_PRESENT, &val);
+	if (ret) {
+		usbpd_err(&pd->dev, "Unable to read USB PRESENT: %d\n", ret);
+		return ret;
+	}
+
+	pd->vbus_present = val.intval;
+
+	/*
+	 * For sink hard reset, state machine needs to know when VBUS changes
+	 *   - when in PE_SNK_TRANSITION_TO_DEFAULT, notify when VBUS falls
+	 *   - when in PE_SNK_DISCOVERY, notify when VBUS rises
+	 */
+	if (typec_mode && ((!pd->vbus_present &&
+			pd->current_state == PE_SNK_TRANSITION_TO_DEFAULT) ||
+		(pd->vbus_present && pd->current_state == PE_SNK_DISCOVERY))) {
+		usbpd_dbg(&pd->dev, "hard reset: typec mode:%d present:%d\n",
+			typec_mode, pd->vbus_present);
+		pd->typec_mode = typec_mode;
+		kick_sm(pd, 0);
+		return 0;
+	}
+
+	if (pd->typec_mode == typec_mode)
+		return 0;
+
+	pd->typec_mode = typec_mode;
+
+	usbpd_dbg(&pd->dev, "typec mode:%d present:%d orientation:%d\n",
+			typec_mode, pd->vbus_present,
+			usbpd_get_plug_orientation(pd));
+
+	ret = usbpd_process_typec_mode(pd, typec_mode);
+
+	/* queue state machine due to CC state change */
+	if (ret)
+		kick_sm(pd, 0);
+	return 0;
+}
+
+static enum dual_role_property usbpd_dr_properties[] = {
+	DUAL_ROLE_PROP_SUPPORTED_MODES,
+	DUAL_ROLE_PROP_MODE,
+	DUAL_ROLE_PROP_PR,
+	DUAL_ROLE_PROP_DR,
+};
+
+static int usbpd_dr_get_property(struct dual_role_phy_instance *dual_role,
+		enum dual_role_property prop, unsigned int *val)
+{
+	struct usbpd *pd = dual_role_get_drvdata(dual_role);
+
+	if (!pd)
+		return -ENODEV;
+
+	switch (prop) {
+	case DUAL_ROLE_PROP_MODE:
+		/* For now associate UFP/DFP with data role only */
+		if (pd->current_dr == DR_UFP)
+			*val = DUAL_ROLE_PROP_MODE_UFP;
+		else if (pd->current_dr == DR_DFP)
+			*val = DUAL_ROLE_PROP_MODE_DFP;
+		else
+			*val = DUAL_ROLE_PROP_MODE_NONE;
+		break;
+	case DUAL_ROLE_PROP_PR:
+		if (pd->current_pr == PR_SRC)
+			*val = DUAL_ROLE_PROP_PR_SRC;
+		else if (pd->current_pr == PR_SINK)
+			*val = DUAL_ROLE_PROP_PR_SNK;
+		else
+			*val = DUAL_ROLE_PROP_PR_NONE;
+		break;
+	case DUAL_ROLE_PROP_DR:
+		if (pd->current_dr == DR_UFP)
+			*val = DUAL_ROLE_PROP_DR_DEVICE;
+		else if (pd->current_dr == DR_DFP)
+			*val = DUAL_ROLE_PROP_DR_HOST;
+		else
+			*val = DUAL_ROLE_PROP_DR_NONE;
+		break;
+	default:
+		usbpd_warn(&pd->dev, "unsupported property %d\n", prop);
+		return -ENODATA;
+	}
+
+	return 0;
+}
+
+static int usbpd_do_swap(struct usbpd *pd, bool dr_swap,
+	unsigned int timeout_ms)
+{
+	if (pd->current_state != PE_SRC_READY &&
+			pd->current_state != PE_SNK_READY) {
+		usbpd_err(&pd->dev, "%s_role swap not allowed: PD not in Ready state\n",
+			dr_swap ? "data" : "power");
+		return -EAGAIN;
+	}
+
+	if (pd->current_state == PE_SNK_READY &&
+			!is_sink_tx_ok(pd)) {
+		usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
+		return -EAGAIN;
+	}
+
+	mutex_lock(&pd->swap_lock);
+	reinit_completion(&pd->is_ready);
+	if (dr_swap)
+		pd->send_dr_swap = true;
+	else
+		pd->send_pr_swap = true;
+	kick_sm(pd, 0);
+
+	/* wait for operation to complete */
+	if (!wait_for_completion_timeout(&pd->is_ready,
+			msecs_to_jiffies(timeout_ms))) {
+		usbpd_err(&pd->dev, "%s_role swap timed out\n",
+			dr_swap ? "data" : "power");
+		mutex_unlock(&pd->swap_lock);
+		return -ETIMEDOUT;
+	}
+
+	mutex_unlock(&pd->swap_lock);
+
+	return 0;
+}
+
+static int usbpd_dr_set_property(struct dual_role_phy_instance *dual_role,
+		enum dual_role_property prop, const unsigned int *val)
+{
+	struct usbpd *pd = dual_role_get_drvdata(dual_role);
+	bool do_swap = false;
+	int ret;
+
+	if (!pd)
+		return -ENODEV;
+
+	switch (prop) {
+	case DUAL_ROLE_PROP_MODE:
+		usbpd_dbg(&pd->dev, "Setting mode to %d\n", *val);
+
+		/*
+		 * Forces disconnect on CC and re-establishes connection.
+		 * This does not use PD-based PR/DR swap
+		 */
+		if (*val == DUAL_ROLE_PROP_MODE_UFP)
+			pd->forced_pr = POWER_SUPPLY_TYPEC_PR_SINK;
+		else if (*val == DUAL_ROLE_PROP_MODE_DFP)
+			pd->forced_pr = POWER_SUPPLY_TYPEC_PR_SOURCE;
+
+		/* new mode will be applied in disconnect handler */
+		set_power_role(pd, PR_NONE);
+
+		/* wait until it takes effect */
+		while (pd->forced_pr != POWER_SUPPLY_TYPEC_PR_NONE)
+			msleep(20);
+
+		break;
+
+	case DUAL_ROLE_PROP_DR:
+		usbpd_dbg(&pd->dev, "Setting data_role to %d\n", *val);
+
+		if (*val == DUAL_ROLE_PROP_DR_HOST) {
+			if (pd->current_dr == DR_UFP)
+				do_swap = true;
+		} else if (*val == DUAL_ROLE_PROP_DR_DEVICE) {
+			if (pd->current_dr == DR_DFP)
+				do_swap = true;
+		} else {
+			usbpd_warn(&pd->dev, "setting data_role to 'none' unsupported\n");
+			return -ENOTSUPP;
+		}
+
+		if (do_swap) {
+			ret = usbpd_do_swap(pd, true, 100);
+			if (ret)
+				return ret;
+
+			if ((*val == DUAL_ROLE_PROP_DR_HOST &&
+					pd->current_dr != DR_DFP) ||
+				(*val == DUAL_ROLE_PROP_DR_DEVICE &&
+					 pd->current_dr != DR_UFP)) {
+				usbpd_err(&pd->dev, "incorrect state (%s) after data_role swap\n",
+						pd->current_dr == DR_DFP ?
+						"dfp" : "ufp");
+				return -EPROTO;
+			}
+		}
+
+		break;
+
+	case DUAL_ROLE_PROP_PR:
+		usbpd_dbg(&pd->dev, "Setting power_role to %d\n", *val);
+
+		if (*val == DUAL_ROLE_PROP_PR_SRC) {
+			if (pd->current_pr == PR_SINK)
+				do_swap = true;
+		} else if (*val == DUAL_ROLE_PROP_PR_SNK) {
+			if (pd->current_pr == PR_SRC)
+				do_swap = true;
+		} else {
+			usbpd_warn(&pd->dev, "setting power_role to 'none' unsupported\n");
+			return -ENOTSUPP;
+		}
+
+		if (do_swap) {
+			ret = usbpd_do_swap(pd, false, 2000);
+			if (ret)
+				return ret;
+
+			if ((*val == DUAL_ROLE_PROP_PR_SRC &&
+					pd->current_pr != PR_SRC) ||
+				(*val == DUAL_ROLE_PROP_PR_SNK &&
+					 pd->current_pr != PR_SINK)) {
+				usbpd_err(&pd->dev, "incorrect state (%s) after power_role swap\n",
+						pd->current_pr == PR_SRC ?
+						"source" : "sink");
+				return -EPROTO;
+			}
+		}
+		break;
+
+	default:
+		usbpd_warn(&pd->dev, "unsupported property %d\n", prop);
+		return -ENOTSUPP;
+	}
+
+	return 0;
+}
+
+static int usbpd_dr_prop_writeable(struct dual_role_phy_instance *dual_role,
+		enum dual_role_property prop)
+{
+	struct usbpd *pd = dual_role_get_drvdata(dual_role);
+
+	switch (prop) {
+	case DUAL_ROLE_PROP_MODE:
+		return 1;
+	case DUAL_ROLE_PROP_DR:
+	case DUAL_ROLE_PROP_PR:
+		if (pd)
+			return pd->current_state == PE_SNK_READY ||
+				pd->current_state == PE_SRC_READY;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int i;
+
+	add_uevent_var(env, "DATA_ROLE=%s", pd->current_dr == DR_DFP ?
+			"dfp" : "ufp");
+
+	if (pd->current_pr == PR_SINK) {
+		add_uevent_var(env, "POWER_ROLE=sink");
+		add_uevent_var(env, "SRC_CAP_ID=%d", pd->src_cap_id);
+
+		for (i = 0; i < ARRAY_SIZE(pd->received_pdos); i++)
+			add_uevent_var(env, "PDO%d=%08x", i,
+					pd->received_pdos[i]);
+
+		add_uevent_var(env, "REQUESTED_PDO=%d", pd->requested_pdo);
+		add_uevent_var(env, "SELECTED_PDO=%d", pd->selected_pdo);
+	} else {
+		add_uevent_var(env, "POWER_ROLE=source");
+		for (i = 0; i < ARRAY_SIZE(default_src_caps); i++)
+			add_uevent_var(env, "PDO%d=%08x", i,
+					default_src_caps[i]);
+	}
+
+	add_uevent_var(env, "RDO=%08x", pd->rdo);
+	add_uevent_var(env, "CONTRACT=%s", pd->in_explicit_contract ?
+				"explicit" : "implicit");
+	add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED);
+
+	add_uevent_var(env, "SDB=%02x %02x %02x %02x %02x", pd->status_db[0],
+			pd->status_db[1], pd->status_db[2], pd->status_db[3],
+			pd->status_db[4]);
+
+	return 0;
+}
+
+static ssize_t contract_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			pd->in_explicit_contract ?  "explicit" : "implicit");
+}
+static DEVICE_ATTR_RO(contract);
+
+static ssize_t current_pr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	const char *pr = "none";
+
+	if (pd->current_pr == PR_SINK)
+		pr = "sink";
+	else if (pd->current_pr == PR_SRC)
+		pr = "source";
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", pr);
+}
+static DEVICE_ATTR_RO(current_pr);
+
+static ssize_t initial_pr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	const char *pr = "none";
+
+	if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+		pr = "sink";
+	else if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SINK)
+		pr = "source";
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", pr);
+}
+static DEVICE_ATTR_RO(initial_pr);
+
+static ssize_t current_dr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	const char *dr = "none";
+
+	if (pd->current_dr == DR_UFP)
+		dr = "ufp";
+	else if (pd->current_dr == DR_DFP)
+		dr = "dfp";
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", dr);
+}
+static DEVICE_ATTR_RO(current_dr);
+
+static ssize_t initial_dr_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	const char *dr = "none";
+
+	if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SOURCE_DEFAULT)
+		dr = "ufp";
+	else if (pd->typec_mode >= POWER_SUPPLY_TYPEC_SINK)
+		dr = "dfp";
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n", dr);
+}
+static DEVICE_ATTR_RO(initial_dr);
+
+static ssize_t src_cap_id_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", pd->src_cap_id);
+}
+static DEVICE_ATTR_RO(src_cap_id);
+
+/* Dump received source PDOs in human-readable format */
+static ssize_t pdo_h_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int i;
+	ssize_t cnt = 0;
+
+	for (i = 0; i < ARRAY_SIZE(pd->received_pdos); i++) {
+		u32 pdo = pd->received_pdos[i];
+
+		if (pdo == 0)
+			break;
+
+		cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt, "PDO %d\n", i + 1);
+
+		if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_FIXED) {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"\tFixed supply\n"
+					"\tDual-Role Power:%d\n"
+					"\tUSB Suspend Supported:%d\n"
+					"\tExternally Powered:%d\n"
+					"\tUSB Communications Capable:%d\n"
+					"\tData Role Swap:%d\n"
+					"\tPeak Current:%d\n"
+					"\tVoltage:%d (mV)\n"
+					"\tMax Current:%d (mA)\n",
+					PD_SRC_PDO_FIXED_PR_SWAP(pdo),
+					PD_SRC_PDO_FIXED_USB_SUSP(pdo),
+					PD_SRC_PDO_FIXED_EXT_POWERED(pdo),
+					PD_SRC_PDO_FIXED_USB_COMM(pdo),
+					PD_SRC_PDO_FIXED_DR_SWAP(pdo),
+					PD_SRC_PDO_FIXED_PEAK_CURR(pdo),
+					PD_SRC_PDO_FIXED_VOLTAGE(pdo) * 50,
+					PD_SRC_PDO_FIXED_MAX_CURR(pdo) * 10);
+		} else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_BATTERY) {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"\tBattery supply\n"
+					"\tMax Voltage:%d (mV)\n"
+					"\tMin Voltage:%d (mV)\n"
+					"\tMax Power:%d (mW)\n",
+					PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
+					PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
+					PD_SRC_PDO_VAR_BATT_MAX(pdo) * 250);
+		} else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_VARIABLE) {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"\tVariable supply\n"
+					"\tMax Voltage:%d (mV)\n"
+					"\tMin Voltage:%d (mV)\n"
+					"\tMax Current:%d (mA)\n",
+					PD_SRC_PDO_VAR_BATT_MAX_VOLT(pdo) * 50,
+					PD_SRC_PDO_VAR_BATT_MIN_VOLT(pdo) * 50,
+					PD_SRC_PDO_VAR_BATT_MAX(pdo) * 10);
+		} else if (PD_SRC_PDO_TYPE(pdo) == PD_SRC_PDO_TYPE_AUGMENTED) {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"\tProgrammable Power supply\n"
+					"\tMax Voltage:%d (mV)\n"
+					"\tMin Voltage:%d (mV)\n"
+					"\tMax Current:%d (mA)\n",
+					PD_APDO_MAX_VOLT(pdo) * 100,
+					PD_APDO_MIN_VOLT(pdo) * 100,
+					PD_APDO_MAX_CURR(pdo) * 50);
+		} else {
+			cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt,
+					"Invalid PDO\n");
+		}
+
+		cnt += scnprintf(&buf[cnt], PAGE_SIZE - cnt, "\n");
+	}
+
+	return cnt;
+}
+static DEVICE_ATTR_RO(pdo_h);
+
+static ssize_t pdo_n_show(struct device *dev, struct device_attribute *attr,
+		char *buf);
+
+#define PDO_ATTR(n) {					\
+	.attr	= { .name = __stringify(pdo##n), .mode = 0444 },	\
+	.show	= pdo_n_show,				\
+}
+static struct device_attribute dev_attr_pdos[] = {
+	PDO_ATTR(1),
+	PDO_ATTR(2),
+	PDO_ATTR(3),
+	PDO_ATTR(4),
+	PDO_ATTR(5),
+	PDO_ATTR(6),
+	PDO_ATTR(7),
+};
+
+static ssize_t pdo_n_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dev_attr_pdos); i++)
+		if (attr == &dev_attr_pdos[i])
+			/* dump the PDO as a hex string */
+			return snprintf(buf, PAGE_SIZE, "%08x\n",
+					pd->received_pdos[i]);
+
+	usbpd_err(&pd->dev, "Invalid PDO index\n");
+	return -EINVAL;
+}
+
+static ssize_t select_pdo_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int src_cap_id;
+	int pdo, uv = 0, ua = 0;
+	int ret;
+
+	mutex_lock(&pd->swap_lock);
+
+	/* Only allowed if we are already in explicit sink contract */
+	if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+		usbpd_err(&pd->dev, "Cannot select new PDO yet\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = sscanf(buf, "%d %d %d %d", &src_cap_id, &pdo, &uv, &ua);
+	if (ret != 2 && ret != 4) {
+		usbpd_err(&pd->dev, "Must specify <src cap id> <PDO> [<uV> <uA>]\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (src_cap_id != pd->src_cap_id) {
+		usbpd_err(&pd->dev, "src_cap_id mismatch.  Requested:%d, current:%d\n",
+				src_cap_id, pd->src_cap_id);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (pdo < 1 || pdo > 7) {
+		usbpd_err(&pd->dev, "invalid PDO:%d\n", pdo);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = pd_select_pdo(pd, pdo, uv, ua);
+	if (ret)
+		goto out;
+
+	reinit_completion(&pd->is_ready);
+	pd->send_request = true;
+	kick_sm(pd, 0);
+
+	/* wait for operation to complete */
+	if (!wait_for_completion_timeout(&pd->is_ready,
+			msecs_to_jiffies(1000))) {
+		usbpd_err(&pd->dev, "request timed out\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* determine if request was accepted/rejected */
+	if (pd->selected_pdo != pd->requested_pdo ||
+			pd->current_voltage != pd->requested_voltage) {
+		usbpd_err(&pd->dev, "request rejected\n");
+		ret = -EINVAL;
+	}
+
+out:
+	pd->send_request = false;
+	mutex_unlock(&pd->swap_lock);
+	return ret ? ret : size;
+}
+
+static ssize_t select_pdo_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", pd->selected_pdo);
+}
+static DEVICE_ATTR_RW(select_pdo);
+
+static ssize_t rdo_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	/* dump the RDO as a hex string */
+	return scnprintf(buf, PAGE_SIZE, "%08x\n", pd->rdo);
+}
+static DEVICE_ATTR_RO(rdo);
+
+static ssize_t rdo_h_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	int pos = PD_RDO_OBJ_POS(pd->rdo);
+	int type = PD_SRC_PDO_TYPE(pd->received_pdos[pos - 1]);
+	int len;
+
+	len = scnprintf(buf, PAGE_SIZE, "Request Data Object\n"
+			"\tObj Pos:%d\n"
+			"\tGiveback:%d\n"
+			"\tCapability Mismatch:%d\n"
+			"\tUSB Communications Capable:%d\n"
+			"\tNo USB Suspend:%d\n",
+			PD_RDO_OBJ_POS(pd->rdo),
+			PD_RDO_GIVEBACK(pd->rdo),
+			PD_RDO_MISMATCH(pd->rdo),
+			PD_RDO_USB_COMM(pd->rdo),
+			PD_RDO_NO_USB_SUSP(pd->rdo));
+
+	switch (type) {
+	case PD_SRC_PDO_TYPE_FIXED:
+	case PD_SRC_PDO_TYPE_VARIABLE:
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"(Fixed/Variable)\n"
+				"\tOperating Current:%d (mA)\n"
+				"\t%s Current:%d (mA)\n",
+				PD_RDO_FIXED_CURR(pd->rdo) * 10,
+				PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
+				PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 10);
+		break;
+
+	case PD_SRC_PDO_TYPE_BATTERY:
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"(Battery)\n"
+				"\tOperating Power:%d (mW)\n"
+				"\t%s Power:%d (mW)\n",
+				PD_RDO_FIXED_CURR(pd->rdo) * 250,
+				PD_RDO_GIVEBACK(pd->rdo) ? "Min" : "Max",
+				PD_RDO_FIXED_CURR_MINMAX(pd->rdo) * 250);
+		break;
+
+	case PD_SRC_PDO_TYPE_AUGMENTED:
+		len += scnprintf(buf + len, PAGE_SIZE - len,
+				"(Programmable)\n"
+				"\tOutput Voltage:%d (mV)\n"
+				"\tOperating Current:%d (mA)\n",
+				PD_RDO_PROG_VOLTAGE(pd->rdo) * 20,
+				PD_RDO_PROG_CURR(pd->rdo) * 50);
+		break;
+	}
+
+	return len;
+}
+static DEVICE_ATTR_RO(rdo_h);
+
+static ssize_t hard_reset_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	u32 val = 0;
+
+	if (kstrtou32(buf, 0, &val) || val != 1)
+		return -EINVAL;
+
+	if (val)
+		usbpd_set_state(pd, pd->current_pr == PR_SRC ?
+				PE_SRC_HARD_RESET : PE_SNK_HARD_RESET);
+
+	return size;
+}
+static DEVICE_ATTR_WO(hard_reset);
+
+static int trigger_tx_msg(struct usbpd *pd, bool *msg_tx_flag)
+{
+	int ret = 0;
+
+	/* Only allowed if we are already in explicit sink contract */
+	if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+		usbpd_err(&pd->dev, "%s: Cannot send msg\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	reinit_completion(&pd->is_ready);
+	*msg_tx_flag = true;
+	kick_sm(pd, 0);
+
+	/* wait for operation to complete */
+	if (!wait_for_completion_timeout(&pd->is_ready,
+			msecs_to_jiffies(1000))) {
+		usbpd_err(&pd->dev, "%s: request timed out\n");
+		ret = -ETIMEDOUT;
+	}
+
+out:
+	*msg_tx_flag = false;
+	return ret;
+
+}
+
+static ssize_t get_src_cap_ext_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int i, ret, len = 0;
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	if (pd->spec_rev == USBPD_REV_20)
+		return -EINVAL;
+
+	ret = trigger_tx_msg(pd, &pd->send_get_src_cap_ext);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < PD_SRC_CAP_EXT_DB_LEN; i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%s0x%02x",
+				i ? " " : "", pd->src_cap_ext_db[i]);
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+
+	return len;
+}
+static DEVICE_ATTR_RO(get_src_cap_ext);
+
+static ssize_t get_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int i, ret, len = 0;
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	if (pd->spec_rev == USBPD_REV_20)
+		return -EINVAL;
+
+	ret = trigger_tx_msg(pd, &pd->send_get_status);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < PD_STATUS_DB_LEN; i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%s0x%02x",
+				i ? " " : "", pd->status_db[i]);
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+
+	return len;
+}
+static DEVICE_ATTR_RO(get_status);
+
+static ssize_t get_pps_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	if (pd->spec_rev == USBPD_REV_20)
+		return -EINVAL;
+
+	ret = trigger_tx_msg(pd, &pd->send_get_pps_status);
+	if (ret)
+		return ret;
+
+	return scnprintf(buf, PAGE_SIZE, "0x%08x\n", pd->pps_status_db);
+}
+static DEVICE_ATTR_RO(get_pps_status);
+
+static ssize_t get_battery_cap_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	u32 val;
+	int ret;
+
+	if (pd->spec_rev == USBPD_REV_20 || kstrtou32(buf, 0, &val)
+			|| val != 1) {
+		pd->get_battery_cap_db = -EINVAL;
+		return -EINVAL;
+	}
+
+	pd->get_battery_cap_db = val;
+
+	ret = trigger_tx_msg(pd, &pd->send_get_battery_cap);
+
+	return ret ? ret : size;
+}
+
+static ssize_t get_battery_cap_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int i, len = 0;
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	if (pd->get_battery_cap_db == -EINVAL)
+		return -EINVAL;
+
+	for (i = 0; i < PD_BATTERY_CAP_DB_LEN; i++)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "%s0x%02x",
+				i ? " " : "", pd->battery_cap_db[i]);
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+
+	return len;
+}
+static DEVICE_ATTR_RW(get_battery_cap);
+
+static ssize_t get_battery_status_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+	u32 val;
+	int ret;
+
+	if (pd->spec_rev == USBPD_REV_20 || kstrtou32(buf, 0, &val)
+			|| val != 1) {
+		pd->get_battery_status_db = -EINVAL;
+		return -EINVAL;
+	}
+
+	pd->get_battery_status_db = val;
+
+	ret = trigger_tx_msg(pd, &pd->send_get_battery_status);
+
+	return ret ? ret : size;
+}
+
+static ssize_t get_battery_status_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usbpd *pd = dev_get_drvdata(dev);
+
+	if (pd->get_battery_status_db == -EINVAL)
+		return -EINVAL;
+
+	return scnprintf(buf, PAGE_SIZE, "0x%08x\n", pd->battery_sts_dobj);
+}
+static DEVICE_ATTR_RW(get_battery_status);
+
+static struct attribute *usbpd_attrs[] = {
+	&dev_attr_contract.attr,
+	&dev_attr_initial_pr.attr,
+	&dev_attr_current_pr.attr,
+	&dev_attr_initial_dr.attr,
+	&dev_attr_current_dr.attr,
+	&dev_attr_src_cap_id.attr,
+	&dev_attr_pdo_h.attr,
+	&dev_attr_pdos[0].attr,
+	&dev_attr_pdos[1].attr,
+	&dev_attr_pdos[2].attr,
+	&dev_attr_pdos[3].attr,
+	&dev_attr_pdos[4].attr,
+	&dev_attr_pdos[5].attr,
+	&dev_attr_pdos[6].attr,
+	&dev_attr_select_pdo.attr,
+	&dev_attr_rdo.attr,
+	&dev_attr_rdo_h.attr,
+	&dev_attr_hard_reset.attr,
+	&dev_attr_get_src_cap_ext.attr,
+	&dev_attr_get_status.attr,
+	&dev_attr_get_pps_status.attr,
+	&dev_attr_get_battery_cap.attr,
+	&dev_attr_get_battery_status.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(usbpd);
+
+static struct class usbpd_class = {
+	.name = "usbpd",
+	.owner = THIS_MODULE,
+	.dev_uevent = usbpd_uevent,
+	.dev_groups = usbpd_groups,
+};
+
+static int match_usbpd_device(struct device *dev, const void *data)
+{
+	return dev->parent == data;
+}
+
+static void devm_usbpd_put(struct device *dev, void *res)
+{
+	struct usbpd **ppd = res;
+
+	put_device(&(*ppd)->dev);
+}
+
+struct usbpd *devm_usbpd_get_by_phandle(struct device *dev, const char *phandle)
+{
+	struct usbpd **ptr, *pd = NULL;
+	struct device_node *pd_np;
+	struct platform_device *pdev;
+	struct device *pd_dev;
+
+	if (!usbpd_class.p) /* usbpd_init() not yet called */
+		return ERR_PTR(-EAGAIN);
+
+	if (!dev->of_node)
+		return ERR_PTR(-EINVAL);
+
+	pd_np = of_parse_phandle(dev->of_node, phandle, 0);
+	if (!pd_np)
+		return ERR_PTR(-ENXIO);
+
+	pdev = of_find_device_by_node(pd_np);
+	if (!pdev)
+		return ERR_PTR(-ENODEV);
+
+	pd_dev = class_find_device(&usbpd_class, NULL, &pdev->dev,
+			match_usbpd_device);
+	if (!pd_dev) {
+		platform_device_put(pdev);
+		/* device was found but maybe hadn't probed yet, so defer */
+		return ERR_PTR(-EPROBE_DEFER);
+	}
+
+	ptr = devres_alloc(devm_usbpd_put, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr) {
+		put_device(pd_dev);
+		platform_device_put(pdev);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pd = dev_get_drvdata(pd_dev);
+	if (!pd)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	*ptr = pd;
+	devres_add(dev, ptr);
+
+	return pd;
+}
+EXPORT_SYMBOL(devm_usbpd_get_by_phandle);
+
+static int num_pd_instances;
+
+/**
+ * usbpd_create - Create a new instance of USB PD protocol/policy engine
+ * @parent - parent device to associate with
+ *
+ * This creates a new usbpd class device which manages the state of a
+ * USB PD-capable port. The parent device that is passed in should be
+ * associated with the physical device port, e.g. a PD PHY.
+ *
+ * Return: struct usbpd pointer, or an ERR_PTR value
+ */
+struct usbpd *usbpd_create(struct device *parent)
+{
+	int ret;
+	struct usbpd *pd;
+
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+	if (!pd)
+		return ERR_PTR(-ENOMEM);
+
+	device_initialize(&pd->dev);
+	pd->dev.class = &usbpd_class;
+	pd->dev.parent = parent;
+	dev_set_drvdata(&pd->dev, pd);
+
+	ret = dev_set_name(&pd->dev, "usbpd%d", num_pd_instances++);
+	if (ret)
+		goto free_pd;
+
+	ret = device_init_wakeup(&pd->dev, true);
+	if (ret)
+		goto free_pd;
+
+	ret = device_add(&pd->dev);
+	if (ret)
+		goto free_pd;
+
+	pd->wq = alloc_ordered_workqueue("usbpd_wq", WQ_FREEZABLE | WQ_HIGHPRI);
+	if (!pd->wq) {
+		ret = -ENOMEM;
+		goto del_pd;
+	}
+	INIT_WORK(&pd->sm_work, usbpd_sm);
+	INIT_WORK(&pd->start_periph_work, start_usb_peripheral_work);
+	hrtimer_init(&pd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	pd->timer.function = pd_timeout;
+	mutex_init(&pd->swap_lock);
+	mutex_init(&pd->svid_handler_lock);
+
+	pd->usb_psy = power_supply_get_by_name("usb");
+	if (!pd->usb_psy) {
+		usbpd_dbg(&pd->dev, "Could not get USB power_supply, deferring probe\n");
+		ret = -EPROBE_DEFER;
+		goto destroy_wq;
+	}
+
+	/*
+	 * associate extcon with the parent dev as it could have a DT
+	 * node which will be useful for extcon_get_edev_by_phandle()
+	 */
+	pd->extcon = devm_extcon_dev_allocate(parent, usbpd_extcon_cable);
+	if (IS_ERR(pd->extcon)) {
+		usbpd_err(&pd->dev, "failed to allocate extcon device\n");
+		ret = PTR_ERR(pd->extcon);
+		goto put_psy;
+	}
+
+	ret = devm_extcon_dev_register(parent, pd->extcon);
+	if (ret) {
+		usbpd_err(&pd->dev, "failed to register extcon device\n");
+		goto put_psy;
+	}
+
+	/* Support reporting polarity and speed via properties */
+	extcon_set_property_capability(pd->extcon, EXTCON_USB,
+			EXTCON_PROP_USB_TYPEC_POLARITY);
+	extcon_set_property_capability(pd->extcon, EXTCON_USB,
+			EXTCON_PROP_USB_SS);
+	extcon_set_property_capability(pd->extcon, EXTCON_USB,
+			EXTCON_PROP_USB_TYPEC_MED_HIGH_CURRENT);
+	extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
+			EXTCON_PROP_USB_TYPEC_POLARITY);
+	extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
+			EXTCON_PROP_USB_SS);
+
+	pd->vconn_is_external = device_property_present(parent,
+					"qcom,vconn-uses-external-source");
+
+	pd->num_sink_caps = device_property_read_u32_array(parent,
+			"qcom,default-sink-caps", NULL, 0);
+	if (pd->num_sink_caps > 0) {
+		int i;
+		u32 sink_caps[14];
+
+		if (pd->num_sink_caps % 2 || pd->num_sink_caps > 14) {
+			ret = -EINVAL;
+			usbpd_err(&pd->dev, "default-sink-caps must be be specified as voltage/current, max 7 pairs\n");
+			goto put_psy;
+		}
+
+		ret = device_property_read_u32_array(parent,
+				"qcom,default-sink-caps", sink_caps,
+				pd->num_sink_caps);
+		if (ret) {
+			usbpd_err(&pd->dev, "Error reading default-sink-caps\n");
+			goto put_psy;
+		}
+
+		pd->num_sink_caps /= 2;
+
+		for (i = 0; i < pd->num_sink_caps; i++) {
+			int v = sink_caps[i * 2] / 50;
+			int c = sink_caps[i * 2 + 1] / 10;
+
+			pd->sink_caps[i] =
+				PD_SNK_PDO_FIXED(0, 0, 0, 0, 0, v, c);
+		}
+
+		/* First PDO includes additional capabilities */
+		pd->sink_caps[0] |= PD_SNK_PDO_FIXED(1, 0, 0, 1, 1, 0, 0);
+	} else {
+		memcpy(pd->sink_caps, default_snk_caps,
+				sizeof(default_snk_caps));
+		pd->num_sink_caps = ARRAY_SIZE(default_snk_caps);
+	}
+
+	/*
+	 * Register the Android dual-role class (/sys/class/dual_role_usb/).
+	 * The first instance should be named "otg_default" as that's what
+	 * Android expects.
+	 * Note this is different than the /sys/class/usbpd/ created above.
+	 */
+	pd->dr_desc.name = (num_pd_instances == 1) ?
+				"otg_default" : dev_name(&pd->dev);
+	pd->dr_desc.supported_modes = DUAL_ROLE_SUPPORTED_MODES_DFP_AND_UFP;
+	pd->dr_desc.properties = usbpd_dr_properties;
+	pd->dr_desc.num_properties = ARRAY_SIZE(usbpd_dr_properties);
+	pd->dr_desc.get_property = usbpd_dr_get_property;
+	pd->dr_desc.set_property = usbpd_dr_set_property;
+	pd->dr_desc.property_is_writeable = usbpd_dr_prop_writeable;
+
+	pd->dual_role = devm_dual_role_instance_register(&pd->dev,
+			&pd->dr_desc);
+	if (IS_ERR(pd->dual_role)) {
+		usbpd_err(&pd->dev, "could not register dual_role instance\n");
+		goto put_psy;
+	} else {
+		pd->dual_role->drv_data = pd;
+	}
+
+	pd->current_pr = PR_NONE;
+	pd->current_dr = DR_NONE;
+	list_add_tail(&pd->instance, &_usbpd);
+
+	spin_lock_init(&pd->rx_lock);
+	INIT_LIST_HEAD(&pd->rx_q);
+	INIT_LIST_HEAD(&pd->svid_handlers);
+	init_completion(&pd->is_ready);
+	init_completion(&pd->tx_chunk_request);
+
+	pd->psy_nb.notifier_call = psy_changed;
+	ret = power_supply_reg_notifier(&pd->psy_nb);
+	if (ret)
+		goto del_inst;
+
+	/* force read initial power_supply values */
+	psy_changed(&pd->psy_nb, PSY_EVENT_PROP_CHANGED, pd->usb_psy);
+
+	return pd;
+
+del_inst:
+	list_del(&pd->instance);
+put_psy:
+	power_supply_put(pd->usb_psy);
+destroy_wq:
+	destroy_workqueue(pd->wq);
+del_pd:
+	device_del(&pd->dev);
+free_pd:
+	num_pd_instances--;
+	kfree(pd);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(usbpd_create);
+
+/**
+ * usbpd_destroy - Removes and frees a usbpd instance
+ * @pd: the instance to destroy
+ */
+void usbpd_destroy(struct usbpd *pd)
+{
+	if (!pd)
+		return;
+
+	list_del(&pd->instance);
+	power_supply_unreg_notifier(&pd->psy_nb);
+	power_supply_put(pd->usb_psy);
+	destroy_workqueue(pd->wq);
+	device_del(&pd->dev);
+	kfree(pd);
+}
+EXPORT_SYMBOL(usbpd_destroy);
+
+static int __init usbpd_init(void)
+{
+	usbpd_ipc_log = ipc_log_context_create(NUM_LOG_PAGES, "usb_pd", 0);
+	return class_register(&usbpd_class);
+}
+module_init(usbpd_init);
+
+static void __exit usbpd_exit(void)
+{
+	class_unregister(&usbpd_class);
+}
+module_exit(usbpd_exit);
+
+MODULE_DESCRIPTION("USB Power Delivery Policy Engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/pd/qpnp-pdphy.c b/drivers/usb/pd/qpnp-pdphy.c
new file mode 100644
index 0000000..f249a96
--- /dev/null
+++ b/drivers/usb/pd/qpnp-pdphy.c
@@ -0,0 +1,921 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/irq.h>
+#include <linux/of_irq.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include "usbpd.h"
+
+#define USB_PDPHY_MAX_DATA_OBJ_LEN	28
+#define USB_PDPHY_MSG_HDR_LEN		2
+
+/* PD PHY register offsets and bit fields */
+#define USB_PDPHY_MSG_CONFIG		0x40
+#define MSG_CONFIG_PORT_DATA_ROLE	BIT(3)
+#define MSG_CONFIG_PORT_POWER_ROLE	BIT(2)
+#define MSG_CONFIG_SPEC_REV_MASK	(BIT(1) | BIT(0))
+
+#define USB_PDPHY_EN_CONTROL		0x46
+#define CONTROL_ENABLE			BIT(0)
+
+#define USB_PDPHY_RX_STATUS		0x4A
+#define RX_FRAME_TYPE			(BIT(0) | BIT(1) | BIT(2))
+
+#define USB_PDPHY_FRAME_FILTER		0x4C
+#define FRAME_FILTER_EN_HARD_RESET	BIT(5)
+#define FRAME_FILTER_EN_SOP		BIT(0)
+
+#define USB_PDPHY_TX_SIZE		0x42
+#define TX_SIZE_MASK			0xF
+
+#define USB_PDPHY_TX_CONTROL		0x44
+#define TX_CONTROL_RETRY_COUNT(n)	(((n) & 0x3) << 5)
+#define TX_CONTROL_FRAME_TYPE		(BIT(4) | BIT(3) | BIT(2))
+#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
+#define TX_CONTROL_SEND_SIGNAL		BIT(1)
+#define TX_CONTROL_SEND_MSG		BIT(0)
+
+#define USB_PDPHY_RX_SIZE		0x48
+
+#define USB_PDPHY_RX_ACKNOWLEDGE	0x4B
+#define RX_BUFFER_TOKEN			BIT(0)
+
+#define USB_PDPHY_BIST_MODE		0x4E
+#define BIST_MODE_MASK			0xF
+#define BIST_ENABLE			BIT(7)
+#define PD_MSG_BIST			0x3
+#define PD_BIST_TEST_DATA_MODE		0x8
+
+#define USB_PDPHY_TX_BUFFER_HDR		0x60
+#define USB_PDPHY_TX_BUFFER_DATA	0x62
+
+#define USB_PDPHY_RX_BUFFER		0x80
+
+/* VDD regulator */
+#define VDD_PDPHY_VOL_MIN		2800000 /* uV */
+#define VDD_PDPHY_VOL_MAX		3300000 /* uV */
+#define VDD_PDPHY_HPM_LOAD		3000 /* uA */
+
+/* Message Spec Rev field */
+#define PD_MSG_HDR_REV(hdr)		(((hdr) >> 6) & 3)
+
+/* timers */
+#define RECEIVER_RESPONSE_TIME		15	/* tReceiverResponse */
+#define HARD_RESET_COMPLETE_TIME	5	/* tHardResetComplete */
+
+struct usb_pdphy {
+	struct device *dev;
+	struct regmap *regmap;
+
+	u16 base;
+	struct regulator *vdd_pdphy;
+
+	/* irqs */
+	int sig_tx_irq;
+	int sig_rx_irq;
+	int msg_tx_irq;
+	int msg_rx_irq;
+	int msg_tx_failed_irq;
+	int msg_tx_discarded_irq;
+	int msg_rx_discarded_irq;
+	bool sig_rx_wake_enabled;
+	bool msg_rx_wake_enabled;
+
+	void (*signal_cb)(struct usbpd *pd, enum pd_sig_type sig);
+	void (*msg_rx_cb)(struct usbpd *pd, enum pd_sop_type sop,
+			  u8 *buf, size_t len);
+	void (*shutdown_cb)(struct usbpd *pd);
+
+	/* write waitq */
+	wait_queue_head_t tx_waitq;
+
+	bool is_opened;
+	int tx_status;
+	u8 frame_filter_val;
+	bool in_test_data_mode;
+
+	enum data_role data_role;
+	enum power_role power_role;
+
+	struct usbpd *usbpd;
+
+	/* debug */
+	struct dentry *debug_root;
+	unsigned int tx_bytes; /* hdr + data */
+	unsigned int rx_bytes; /* hdr + data */
+	unsigned int sig_tx_cnt;
+	unsigned int sig_rx_cnt;
+	unsigned int msg_tx_cnt;
+	unsigned int msg_rx_cnt;
+	unsigned int msg_tx_failed_cnt;
+	unsigned int msg_tx_discarded_cnt;
+	unsigned int msg_rx_discarded_cnt;
+};
+
+static struct usb_pdphy *__pdphy;
+
+static int pdphy_dbg_status(struct seq_file *s, void *p)
+{
+	struct usb_pdphy *pdphy = s->private;
+
+	seq_printf(s,
+		"PD Phy driver status\n"
+		"==================================================\n");
+	seq_printf(s, "opened:         %10d\n", pdphy->is_opened);
+	seq_printf(s, "tx status:      %10d\n", pdphy->tx_status);
+	seq_printf(s, "tx bytes:       %10u\n", pdphy->tx_bytes);
+	seq_printf(s, "rx bytes:       %10u\n", pdphy->rx_bytes);
+	seq_printf(s, "data role:      %10u\n", pdphy->data_role);
+	seq_printf(s, "power role:     %10u\n", pdphy->power_role);
+	seq_printf(s, "frame filter:   %10u\n", pdphy->frame_filter_val);
+	seq_printf(s, "sig tx cnt:     %10u\n", pdphy->sig_tx_cnt);
+	seq_printf(s, "sig rx cnt:     %10u\n", pdphy->sig_rx_cnt);
+	seq_printf(s, "msg tx cnt:     %10u\n", pdphy->msg_tx_cnt);
+	seq_printf(s, "msg rx cnt:     %10u\n", pdphy->msg_rx_cnt);
+	seq_printf(s, "msg tx failed cnt:    %10u\n",
+			pdphy->msg_tx_failed_cnt);
+	seq_printf(s, "msg tx discarded cnt: %10u\n",
+			pdphy->msg_tx_discarded_cnt);
+	seq_printf(s, "msg rx discarded cnt: %10u\n",
+			pdphy->msg_rx_discarded_cnt);
+
+	return 0;
+}
+
+static int pdphy_dbg_status_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pdphy_dbg_status, inode->i_private);
+}
+
+static const struct file_operations status_ops = {
+	.owner		= THIS_MODULE,
+	.open		= pdphy_dbg_status_open,
+	.llseek		= seq_lseek,
+	.read		= seq_read,
+	.release	= single_release,
+};
+
+static void pdphy_create_debugfs_entries(struct usb_pdphy *pdphy)
+{
+	struct dentry *ent;
+
+	pdphy->debug_root = debugfs_create_dir("usb-pdphy", NULL);
+	if (!pdphy->debug_root) {
+		dev_warn(pdphy->dev, "Couldn't create debug dir\n");
+		return;
+	}
+
+	ent = debugfs_create_file("status", 0400, pdphy->debug_root, pdphy,
+				  &status_ops);
+	if (!ent) {
+		dev_warn(pdphy->dev, "Couldn't create status file\n");
+		debugfs_remove(pdphy->debug_root);
+	}
+}
+
+static int pdphy_enable_power(struct usb_pdphy *pdphy, bool on)
+{
+	int ret = 0;
+
+	dev_dbg(pdphy->dev, "%s turn %s regulator.\n", __func__,
+		on ? "on" : "off");
+
+	if (!on)
+		goto disable_pdphy_vdd;
+
+	ret = regulator_set_load(pdphy->vdd_pdphy, VDD_PDPHY_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(pdphy->dev, "Unable to set HPM of vdd_pdphy:%d\n", ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(pdphy->vdd_pdphy, VDD_PDPHY_VOL_MIN,
+						VDD_PDPHY_VOL_MAX);
+	if (ret) {
+		dev_err(pdphy->dev,
+				"set voltage failed for vdd_pdphy:%d\n", ret);
+		goto put_pdphy_vdd_lpm;
+	}
+
+	ret = regulator_enable(pdphy->vdd_pdphy);
+	if (ret) {
+		dev_err(pdphy->dev, "Unable to enable vdd_pdphy:%d\n", ret);
+		goto unset_pdphy_vdd;
+	}
+
+	dev_dbg(pdphy->dev, "%s: PD PHY regulator turned ON.\n", __func__);
+	return ret;
+
+disable_pdphy_vdd:
+	ret = regulator_disable(pdphy->vdd_pdphy);
+	if (ret)
+		dev_err(pdphy->dev, "Unable to disable vdd_pdphy:%d\n", ret);
+
+unset_pdphy_vdd:
+	ret = regulator_set_voltage(pdphy->vdd_pdphy, 0, VDD_PDPHY_VOL_MAX);
+	if (ret)
+		dev_err(pdphy->dev,
+			"Unable to set (0) voltage for vdd_pdphy:%d\n", ret);
+
+put_pdphy_vdd_lpm:
+	ret = regulator_set_load(pdphy->vdd_pdphy, 0);
+	if (ret < 0)
+		dev_err(pdphy->dev, "Unable to set (0) HPM of vdd_pdphy\n");
+
+	return ret;
+}
+
+void pdphy_enable_irq(struct usb_pdphy *pdphy, bool enable)
+{
+	if (enable) {
+		enable_irq(pdphy->sig_tx_irq);
+		enable_irq(pdphy->sig_rx_irq);
+		pdphy->sig_rx_wake_enabled =
+			!enable_irq_wake(pdphy->sig_rx_irq);
+		enable_irq(pdphy->msg_tx_irq);
+		if (!pdphy->in_test_data_mode) {
+			enable_irq(pdphy->msg_rx_irq);
+			pdphy->msg_rx_wake_enabled =
+				!enable_irq_wake(pdphy->msg_rx_irq);
+		}
+		enable_irq(pdphy->msg_tx_failed_irq);
+		enable_irq(pdphy->msg_tx_discarded_irq);
+		enable_irq(pdphy->msg_rx_discarded_irq);
+		return;
+	}
+
+	disable_irq(pdphy->sig_tx_irq);
+	disable_irq(pdphy->sig_rx_irq);
+	if (pdphy->sig_rx_wake_enabled) {
+		disable_irq_wake(pdphy->sig_rx_irq);
+		pdphy->sig_rx_wake_enabled = false;
+	}
+	disable_irq(pdphy->msg_tx_irq);
+	if (!pdphy->in_test_data_mode)
+		disable_irq(pdphy->msg_rx_irq);
+	if (pdphy->msg_rx_wake_enabled) {
+		disable_irq_wake(pdphy->msg_rx_irq);
+		pdphy->msg_rx_wake_enabled = false;
+	}
+	disable_irq(pdphy->msg_tx_failed_irq);
+	disable_irq(pdphy->msg_tx_discarded_irq);
+	disable_irq(pdphy->msg_rx_discarded_irq);
+}
+
+static int pdphy_reg_read(struct usb_pdphy *pdphy, u8 *val, u16 addr, int count)
+{
+	int ret;
+
+	ret = regmap_bulk_read(pdphy->regmap, pdphy->base + addr, val, count);
+	if (ret) {
+		dev_err(pdphy->dev, "read failed: addr=0x%04x, ret=%d\n",
+			pdphy->base + addr, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Write multiple registers to device with block of data */
+static int pdphy_bulk_reg_write(struct usb_pdphy *pdphy, u16 addr,
+	const void *val, u8 val_cnt)
+{
+	int ret;
+
+	ret = regmap_bulk_write(pdphy->regmap, pdphy->base + addr,
+			val, val_cnt);
+	if (ret) {
+		dev_err(pdphy->dev, "bulk write failed: addr=0x%04x, ret=%d\n",
+				pdphy->base + addr, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/* Writes a single byte to the specified register */
+static inline int pdphy_reg_write(struct usb_pdphy *pdphy, u16 addr, u8 val)
+{
+	return pdphy_bulk_reg_write(pdphy, addr, &val, 1);
+}
+
+/* Writes to the specified register limited by the bit mask */
+static int pdphy_masked_write(struct usb_pdphy *pdphy, u16 addr,
+	u8 mask, u8 val)
+{
+	int ret;
+
+	ret = regmap_update_bits(pdphy->regmap, pdphy->base + addr, mask, val);
+	if (ret) {
+		dev_err(pdphy->dev, "write failed: addr=0x%04x, ret=%d\n",
+				pdphy->base + addr, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+int pd_phy_update_roles(enum data_role dr, enum power_role pr)
+{
+	struct usb_pdphy *pdphy = __pdphy;
+
+	return pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+		(MSG_CONFIG_PORT_DATA_ROLE | MSG_CONFIG_PORT_POWER_ROLE),
+		((dr == DR_DFP ? MSG_CONFIG_PORT_DATA_ROLE : 0) |
+		 (pr == PR_SRC ? MSG_CONFIG_PORT_POWER_ROLE : 0)));
+}
+EXPORT_SYMBOL(pd_phy_update_roles);
+
+int pd_phy_update_frame_filter(u8 frame_filter_val)
+{
+	struct usb_pdphy *pdphy = __pdphy;
+
+	return pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, frame_filter_val);
+}
+EXPORT_SYMBOL(pd_phy_update_frame_filter);
+
+int pd_phy_open(struct pd_phy_params *params)
+{
+	int ret;
+	struct usb_pdphy *pdphy = __pdphy;
+
+	if (!pdphy) {
+		pr_err("%s: pdphy not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (pdphy->is_opened) {
+		dev_err(pdphy->dev, "%s: already opened\n", __func__);
+		return -EBUSY;
+	}
+
+	pdphy->signal_cb = params->signal_cb;
+	pdphy->msg_rx_cb = params->msg_rx_cb;
+	pdphy->shutdown_cb = params->shutdown_cb;
+	pdphy->data_role = params->data_role;
+	pdphy->power_role = params->power_role;
+	pdphy->frame_filter_val = params->frame_filter_val;
+
+	dev_dbg(pdphy->dev, "%s: DR %x PR %x frame filter val %x\n", __func__,
+		pdphy->data_role, pdphy->power_role, pdphy->frame_filter_val);
+
+	ret = pdphy_enable_power(pdphy, true);
+	if (ret)
+		return ret;
+
+	/* update data and power role to be used in GoodCRC generation */
+	ret = pd_phy_update_roles(pdphy->data_role, pdphy->power_role);
+	if (ret)
+		return ret;
+
+	/* PD 2.0  phy */
+	ret = pdphy_masked_write(pdphy, USB_PDPHY_MSG_CONFIG,
+			MSG_CONFIG_SPEC_REV_MASK, USBPD_REV_20);
+	if (ret)
+		return ret;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, 0);
+	if (ret)
+		return ret;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, CONTROL_ENABLE);
+	if (ret)
+		return ret;
+
+	/* update frame filter */
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER,
+			pdphy->frame_filter_val);
+	if (ret)
+		return ret;
+
+	/* initialize Rx buffer ownership to PDPHY HW */
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+	if (ret)
+		return ret;
+
+	pdphy->is_opened = true;
+	pdphy_enable_irq(pdphy, true);
+
+	return ret;
+}
+EXPORT_SYMBOL(pd_phy_open);
+
+int pd_phy_signal(enum pd_sig_type sig)
+{
+	u8 val;
+	int ret;
+	struct usb_pdphy *pdphy = __pdphy;
+
+	dev_dbg(pdphy->dev, "%s: type %d\n", __func__, sig);
+
+	if (!pdphy) {
+		pr_err("%s: pdphy not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!pdphy->is_opened) {
+		dev_dbg(pdphy->dev, "%s: pdphy disabled\n", __func__);
+		return -ENODEV;
+	}
+
+	pdphy->tx_status = -EINPROGRESS;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+	if (ret)
+		return ret;
+
+	usleep_range(2, 3);
+
+	val = (sig == CABLE_RESET_SIG ? TX_CONTROL_FRAME_TYPE_CABLE_RESET : 0)
+		| TX_CONTROL_SEND_SIGNAL;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val);
+	if (ret)
+		return ret;
+
+	ret = wait_event_interruptible_hrtimeout(pdphy->tx_waitq,
+		pdphy->tx_status != -EINPROGRESS,
+		ms_to_ktime(HARD_RESET_COMPLETE_TIME));
+	if (ret) {
+		dev_err(pdphy->dev, "%s: failed ret %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+
+	if (pdphy->tx_status)
+		return pdphy->tx_status;
+
+	if (sig == HARD_RESET_SIG)
+		/* Frame filter is reconfigured in pd_phy_open() */
+		return pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, 0);
+
+	return 0;
+}
+EXPORT_SYMBOL(pd_phy_signal);
+
+int pd_phy_write(u16 hdr, const u8 *data, size_t data_len, enum pd_sop_type sop)
+{
+	u8 val;
+	int ret;
+	size_t total_len = data_len + USB_PDPHY_MSG_HDR_LEN;
+	struct usb_pdphy *pdphy;
+
+	if (!pdphy) {
+		pr_err("%s: pdphy not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (!pdphy->is_opened) {
+		dev_dbg(pdphy->dev, "%s: pdphy disabled\n", __func__);
+		return -ENODEV;
+	}
+
+	dev_dbg(pdphy->dev, "%s: hdr %x frame sop_type %d\n",
+			__func__, hdr, sop);
+
+	if (data_len > USB_PDPHY_MAX_DATA_OBJ_LEN) {
+		dev_err(pdphy->dev, "%s: invalid data object len %zu\n",
+			__func__, data_len);
+		return -EINVAL;
+	}
+
+	ret = pdphy_reg_read(pdphy, &val, USB_PDPHY_RX_ACKNOWLEDGE, 1);
+	if (ret || val) {
+		dev_err(pdphy->dev, "%s: RX message pending\n", __func__);
+		return -EBUSY;
+	}
+
+	pdphy->tx_status = -EINPROGRESS;
+
+	/* write 2 byte SOP message header */
+	ret = pdphy_bulk_reg_write(pdphy, USB_PDPHY_TX_BUFFER_HDR, (u8 *)&hdr,
+			USB_PDPHY_MSG_HDR_LEN);
+	if (ret)
+		return ret;
+
+	if (data && data_len) {
+		print_hex_dump_debug("tx data obj:", DUMP_PREFIX_NONE, 32, 4,
+				data, data_len, false);
+
+		/* write data objects of SOP message */
+		ret = pdphy_bulk_reg_write(pdphy, USB_PDPHY_TX_BUFFER_DATA,
+				data, data_len);
+		if (ret)
+			return ret;
+	}
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_SIZE, total_len - 1);
+	if (ret)
+		return ret;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+	if (ret)
+		return ret;
+
+	usleep_range(2, 3);
+
+	val = (sop << 2) | TX_CONTROL_SEND_MSG;
+
+	/* nRetryCount == 2 for PD 3.0, 3 for PD 2.0 */
+	if (PD_MSG_HDR_REV(hdr) == USBPD_REV_30)
+		val |= TX_CONTROL_RETRY_COUNT(2);
+	else
+		val |= TX_CONTROL_RETRY_COUNT(3);
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, val);
+	if (ret)
+		return ret;
+
+	ret = wait_event_interruptible_hrtimeout(pdphy->tx_waitq,
+		pdphy->tx_status != -EINPROGRESS,
+		ms_to_ktime(RECEIVER_RESPONSE_TIME));
+	if (ret) {
+		dev_err(pdphy->dev, "%s: failed ret %d\n", __func__, ret);
+		return ret;
+	}
+
+	if (!pdphy->tx_status)
+		pdphy->tx_bytes += data_len + USB_PDPHY_MSG_HDR_LEN;
+
+	return pdphy->tx_status ? pdphy->tx_status : 0;
+}
+EXPORT_SYMBOL(pd_phy_write);
+
+void pd_phy_close(void)
+{
+	int ret;
+	struct usb_pdphy *pdphy = __pdphy;
+
+	if (!pdphy) {
+		pr_err("%s: pdphy not found\n", __func__);
+		return;
+	}
+
+	if (!pdphy->is_opened) {
+		dev_err(pdphy->dev, "%s: not opened\n", __func__);
+		return;
+	}
+
+	pdphy->is_opened = false;
+	pdphy_enable_irq(pdphy, false);
+
+	pdphy->tx_status = -ESHUTDOWN;
+
+	wake_up_all(&pdphy->tx_waitq);
+
+	pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+	pdphy->in_test_data_mode = false;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_TX_CONTROL, 0);
+	if (ret)
+		return;
+
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_EN_CONTROL, 0);
+	if (ret)
+		return;
+
+	pdphy_enable_power(pdphy, false);
+}
+EXPORT_SYMBOL(pd_phy_close);
+
+static irqreturn_t pdphy_msg_tx_irq(int irq, void *data)
+{
+	struct usb_pdphy *pdphy = data;
+
+	/* TX already aborted by received signal */
+	if (pdphy->tx_status != -EINPROGRESS)
+		return IRQ_HANDLED;
+
+	if (irq == pdphy->msg_tx_irq) {
+		pdphy->msg_tx_cnt++;
+		pdphy->tx_status = 0;
+	} else if (irq == pdphy->msg_tx_discarded_irq) {
+		pdphy->msg_tx_discarded_cnt++;
+		pdphy->tx_status = -EBUSY;
+	} else if (irq == pdphy->msg_tx_failed_irq) {
+		pdphy->msg_tx_failed_cnt++;
+		pdphy->tx_status = -EFAULT;
+	} else {
+		dev_err(pdphy->dev, "spurious irq #%d received\n", irq);
+		return IRQ_NONE;
+	}
+
+	wake_up(&pdphy->tx_waitq);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_msg_rx_discarded_irq(int irq, void *data)
+{
+	struct usb_pdphy *pdphy = data;
+
+	pdphy->msg_rx_discarded_cnt++;
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_sig_rx_irq_thread(int irq, void *data)
+{
+	u8 rx_status, frame_type;
+	int ret;
+	struct usb_pdphy *pdphy = data;
+
+	pdphy->sig_rx_cnt++;
+
+	ret = pdphy_reg_read(pdphy, &rx_status, USB_PDPHY_RX_STATUS, 1);
+	if (ret)
+		goto done;
+
+	frame_type = rx_status & RX_FRAME_TYPE;
+	if (frame_type != HARD_RESET_SIG) {
+		dev_err(pdphy->dev, "%s:unsupported frame type %d\n",
+			__func__, frame_type);
+		goto done;
+	}
+
+	/* Frame filter is reconfigured in pd_phy_open() */
+	ret = pdphy_reg_write(pdphy, USB_PDPHY_FRAME_FILTER, 0);
+
+	if (pdphy->signal_cb)
+		pdphy->signal_cb(pdphy->usbpd, frame_type);
+
+	if (pdphy->tx_status == -EINPROGRESS) {
+		pdphy->tx_status = -EBUSY;
+		wake_up(&pdphy->tx_waitq);
+	}
+done:
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pdphy_sig_tx_irq_thread(int irq, void *data)
+{
+	struct usb_pdphy *pdphy = data;
+
+	/* in case of exit from BIST Carrier Mode 2, clear BIST_MODE */
+	pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+
+	pdphy->sig_tx_cnt++;
+	pdphy->tx_status = 0;
+	wake_up(&pdphy->tx_waitq);
+
+	return IRQ_HANDLED;
+}
+
+static int pd_phy_bist_mode(u8 bist_mode)
+{
+	struct usb_pdphy *pdphy = __pdphy;
+
+	dev_dbg(pdphy->dev, "%s: enter BIST mode %d\n", __func__, bist_mode);
+
+	pdphy_reg_write(pdphy, USB_PDPHY_BIST_MODE, 0);
+
+	udelay(5);
+
+	return pdphy_masked_write(pdphy, USB_PDPHY_BIST_MODE,
+			BIST_MODE_MASK | BIST_ENABLE, bist_mode | BIST_ENABLE);
+}
+
+static irqreturn_t pdphy_msg_rx_irq(int irq, void *data)
+{
+	u8 size, rx_status, frame_type;
+	u8 buf[32];
+	int ret;
+	struct usb_pdphy *pdphy = data;
+
+	pdphy->msg_rx_cnt++;
+
+	ret = pdphy_reg_read(pdphy, &size, USB_PDPHY_RX_SIZE, 1);
+	if (ret)
+		goto done;
+
+	if (!size || size > 31) {
+		dev_err(pdphy->dev, "%s: invalid size %d\n", __func__, size);
+		goto done;
+	}
+
+	ret = pdphy_reg_read(pdphy, &rx_status, USB_PDPHY_RX_STATUS, 1);
+	if (ret)
+		goto done;
+
+	frame_type = rx_status & RX_FRAME_TYPE;
+	if (frame_type == SOPII_MSG) {
+		dev_err(pdphy->dev, "%s:unsupported frame type %d\n",
+			__func__, frame_type);
+		goto done;
+	}
+
+	ret = pdphy_reg_read(pdphy, buf, USB_PDPHY_RX_BUFFER, size + 1);
+	if (ret)
+		goto done;
+
+	/* ack to change ownership of rx buffer back to PDPHY RX HW */
+	pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+
+	if (((buf[0] & 0xf) == PD_MSG_BIST) && size >= 5) { /* BIST */
+		u8 mode = buf[5] >> 4; /* [31:28] of 1st data object */
+
+		pd_phy_bist_mode(mode);
+		pdphy_reg_write(pdphy, USB_PDPHY_RX_ACKNOWLEDGE, 0);
+
+		if (mode == PD_BIST_TEST_DATA_MODE) {
+			pdphy->in_test_data_mode = true;
+			disable_irq_nosync(irq);
+		}
+		goto done;
+	}
+
+	if (pdphy->msg_rx_cb)
+		pdphy->msg_rx_cb(pdphy->usbpd, frame_type, buf, size + 1);
+
+	print_hex_dump_debug("rx msg:", DUMP_PREFIX_NONE, 32, 4, buf, size + 1,
+		false);
+	pdphy->rx_bytes += size + 1;
+done:
+	return IRQ_HANDLED;
+}
+
+static int pdphy_request_irq(struct usb_pdphy *pdphy,
+				struct device_node *node,
+				int *irq_num, const char *irq_name,
+				irqreturn_t (irq_handler)(int irq, void *data),
+				irqreturn_t (thread_fn)(int irq, void *data),
+				int flags)
+{
+	int ret;
+
+	*irq_num = of_irq_get_byname(node, irq_name);
+	if (*irq_num < 0) {
+		dev_err(pdphy->dev, "Unable to get %s irq\n", irq_name);
+		ret = -ENXIO;
+	}
+
+	irq_set_status_flags(*irq_num, IRQ_NOAUTOEN);
+	ret = devm_request_threaded_irq(pdphy->dev, *irq_num, irq_handler,
+			thread_fn, flags, irq_name, pdphy);
+	if (ret < 0) {
+		dev_err(pdphy->dev, "Unable to request %s irq: %d\n",
+				irq_name, ret);
+		ret = -ENXIO;
+	}
+
+	return 0;
+}
+
+static int pdphy_probe(struct platform_device *pdev)
+{
+	int ret;
+	unsigned int base;
+	struct usb_pdphy *pdphy;
+
+	pdphy = devm_kzalloc(&pdev->dev, sizeof(*pdphy), GFP_KERNEL);
+	if (!pdphy)
+		return -ENOMEM;
+
+	pdphy->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!pdphy->regmap) {
+		dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+		return -EINVAL;
+	}
+
+	dev_set_drvdata(&pdev->dev, pdphy);
+
+	ret = of_property_read_u32(pdev->dev.of_node, "reg", &base);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to get reg base address ret = %d\n",
+			ret);
+		return ret;
+	}
+
+	pdphy->base = base;
+	pdphy->dev = &pdev->dev;
+
+	init_waitqueue_head(&pdphy->tx_waitq);
+
+	pdphy->vdd_pdphy = devm_regulator_get(&pdev->dev, "vdd-pdphy");
+	if (IS_ERR(pdphy->vdd_pdphy)) {
+		dev_err(&pdev->dev, "unable to get vdd-pdphy\n");
+		return PTR_ERR(pdphy->vdd_pdphy);
+	}
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->sig_tx_irq, "sig-tx", NULL,
+		pdphy_sig_tx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->sig_rx_irq, "sig-rx", NULL,
+		pdphy_sig_rx_irq_thread, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_tx_irq, "msg-tx", pdphy_msg_tx_irq,
+		NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_rx_irq, "msg-rx", pdphy_msg_rx_irq,
+		NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_tx_failed_irq, "msg-tx-failed", pdphy_msg_tx_irq,
+		NULL, (IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_tx_discarded_irq, "msg-tx-discarded",
+		pdphy_msg_tx_irq, NULL,
+		(IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	ret = pdphy_request_irq(pdphy, pdev->dev.of_node,
+		&pdphy->msg_rx_discarded_irq, "msg-rx-discarded",
+		pdphy_msg_rx_discarded_irq, NULL,
+		(IRQF_TRIGGER_RISING | IRQF_ONESHOT));
+	if (ret < 0)
+		return ret;
+
+	/* usbpd_create() could call back to us, so have __pdphy ready */
+	__pdphy = pdphy;
+
+	pdphy->usbpd = usbpd_create(&pdev->dev);
+	if (IS_ERR(pdphy->usbpd)) {
+		dev_err(&pdev->dev, "usbpd_create failed: %ld\n",
+				PTR_ERR(pdphy->usbpd));
+		__pdphy = NULL;
+		return PTR_ERR(pdphy->usbpd);
+	}
+
+	pdphy_create_debugfs_entries(pdphy);
+
+	return 0;
+}
+
+static int pdphy_remove(struct platform_device *pdev)
+{
+	struct usb_pdphy *pdphy = platform_get_drvdata(pdev);
+
+	debugfs_remove_recursive(pdphy->debug_root);
+	usbpd_destroy(pdphy->usbpd);
+
+	if (pdphy->is_opened)
+		pd_phy_close();
+
+	__pdphy = NULL;
+
+	return 0;
+}
+
+static void pdphy_shutdown(struct platform_device *pdev)
+{
+	struct usb_pdphy *pdphy = platform_get_drvdata(pdev);
+
+	/* let protocol engine shutdown the pdphy synchronously */
+	if (pdphy->shutdown_cb)
+		pdphy->shutdown_cb(pdphy->usbpd);
+}
+
+static const struct of_device_id pdphy_match_table[] = {
+	{
+		.compatible	 = "qcom,qpnp-pdphy",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, pdphy_match_table);
+
+static struct platform_driver pdphy_driver = {
+	 .driver	 = {
+		 .name			= "qpnp-pdphy",
+		 .of_match_table	= pdphy_match_table,
+	 },
+	 .probe		= pdphy_probe,
+	 .remove	= pdphy_remove,
+	 .shutdown	= pdphy_shutdown,
+};
+
+module_platform_driver(pdphy_driver);
+
+MODULE_DESCRIPTION("QPNP PD PHY Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:qpnp-pdphy");
diff --git a/drivers/usb/pd/usbpd.h b/drivers/usb/pd/usbpd.h
new file mode 100644
index 0000000..a630820
--- /dev/null
+++ b/drivers/usb/pd/usbpd.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _USBPD_H
+#define _USBPD_H
+
+#include <linux/device.h>
+
+struct usbpd;
+
+#if IS_ENABLED(CONFIG_USB_PD_POLICY)
+struct usbpd *usbpd_create(struct device *parent);
+void usbpd_destroy(struct usbpd *pd);
+#else
+static inline struct usbpd *usbpd_create(struct device *parent)
+{
+	return ERR_PTR(-ENODEV);
+}
+static inline void usbpd_destroy(struct usbpd *pd) { }
+#endif
+
+enum data_role {
+	DR_NONE = -1,
+	DR_UFP = 0,
+	DR_DFP = 1,
+};
+
+enum power_role {
+	PR_NONE = -1,
+	PR_SINK = 0,
+	PR_SRC = 1,
+};
+
+enum pd_sig_type {
+	HARD_RESET_SIG = 0,
+	CABLE_RESET_SIG,
+};
+
+enum pd_sop_type {
+	SOP_MSG = 0,
+	SOPI_MSG,
+	SOPII_MSG,
+};
+
+enum pd_spec_rev {
+	USBPD_REV_20 = 1,
+	USBPD_REV_30 = 2,
+};
+
+/* enable msg and signal to be received by phy */
+#define FRAME_FILTER_EN_SOP		BIT(0)
+#define FRAME_FILTER_EN_SOPI		BIT(1)
+#define FRAME_FILTER_EN_HARD_RESET	BIT(5)
+
+struct pd_phy_params {
+	void		(*signal_cb)(struct usbpd *pd, enum pd_sig_type sig);
+	void		(*msg_rx_cb)(struct usbpd *pd, enum pd_sop_type sop,
+					u8 *buf, size_t len);
+	void		(*shutdown_cb)(struct usbpd *pd);
+	enum data_role	data_role;
+	enum power_role power_role;
+	u8		frame_filter_val;
+};
+
+#if IS_ENABLED(CONFIG_QPNP_USB_PDPHY)
+int pd_phy_open(struct pd_phy_params *params);
+int pd_phy_signal(enum pd_sig_type sig);
+int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
+		enum pd_sop_type sop);
+int pd_phy_update_roles(enum data_role dr, enum power_role pr);
+int pd_phy_update_frame_filter(u8 frame_filter_val);
+void pd_phy_close(void);
+#else
+static inline int pd_phy_open(struct pd_phy_params *params)
+{
+	return -ENODEV;
+}
+
+static inline int pd_phy_signal(enum pd_sig_type type)
+{
+	return -ENODEV;
+}
+
+static inline int pd_phy_write(u16 hdr, const u8 *data, size_t data_len,
+		enum pd_sop_type sop)
+{
+	return -ENODEV;
+}
+
+static inline int pd_phy_update_roles(enum data_role dr, enum power_role pr)
+{
+	return -ENODEV;
+}
+
+static inline int pd_phy_update_frame_filter(u8 frame_filter_val)
+{
+	return -ENODEV;
+}
+
+static inline void pd_phy_close(void)
+{
+}
+#endif
+#endif /* _USBPD_H */
diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c
index 301aebd..230f58f 100644
--- a/drivers/usb/phy/phy-msm-snps-hs.c
+++ b/drivers/usb/phy/phy-msm-snps-hs.c
@@ -10,6 +10,7 @@
 #include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/io.h>
+#include <linux/mutex.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
@@ -20,6 +21,8 @@
 #include <linux/reset.h>
 
 #define USB2_PHY_USB_PHY_UTMI_CTRL0		(0x3c)
+#define OPMODE_MASK				(0x3 << 3)
+#define OPMODE_NONDRIVING			(0x1 << 3)
 #define SLEEPM					BIT(0)
 
 #define USB2_PHY_USB_PHY_UTMI_CTRL5		(0x50)
@@ -54,6 +57,7 @@
 #define TOGGLE_2WR				BIT(6)
 
 #define USB2_PHY_USB_PHY_CFG0			(0x94)
+#define UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN	BIT(0)
 #define UTMI_PHY_CMN_CTRL_OVERRIDE_EN		BIT(1)
 
 #define USB2_PHY_USB_PHY_REFCLK_CTRL		(0xa0)
@@ -89,6 +93,7 @@
 	bool			power_enabled;
 	bool			suspended;
 	bool			cable_connected;
+	bool			dpdm_enable;
 
 	int			*param_override_seq;
 	int			param_override_seq_cnt;
@@ -96,6 +101,10 @@
 	void __iomem		*phy_rcal_reg;
 	u32			rcal_mask;
 
+	struct mutex		phy_lock;
+	struct regulator_desc	dpdm_rdesc;
+	struct regulator_dev	*dpdm_rdev;
+
 	void __iomem		*phy_rcal_reg;
 	u32			rcal_mask;
 
@@ -457,8 +466,14 @@
 			(phy->phy.flags & PHY_HOST_MODE)) {
 			msm_hsphy_enable_clocks(phy, false);
 		} else {/* Cable disconnect */
-			msm_hsphy_enable_clocks(phy, false);
-			msm_hsphy_enable_power(phy, false);
+			mutex_lock(&phy->phy_lock);
+			if (!phy->dpdm_enable) {
+				msm_hsphy_enable_clocks(phy, false);
+				msm_hsphy_enable_power(phy, false);
+			} else {
+				dev_dbg(uphy->dev, "dpdm reg still active.  Keep clocks/ldo ON\n");
+			}
+			mutex_unlock(&phy->phy_lock);
 		}
 		phy->suspended = true;
 	} else { /* Bus resume and cable connect */
@@ -489,6 +504,114 @@
 	return 0;
 }
 
+static int msm_hsphy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+	struct msm_hsphy *phy = rdev_get_drvdata(rdev);
+
+	dev_dbg(phy->phy.dev, "%s dpdm_enable:%d\n",
+				__func__, phy->dpdm_enable);
+
+	mutex_lock(&phy->phy_lock);
+	if (!phy->dpdm_enable) {
+		ret = msm_hsphy_enable_power(phy, true);
+		if (ret) {
+			mutex_unlock(&phy->phy_lock);
+			return ret;
+		}
+
+		msm_hsphy_enable_clocks(phy, true);
+		msm_hsphy_reset(phy);
+
+		/*
+		 * For PMIC charger detection, place PHY in UTMI non-driving
+		 * mode which leaves Dp and Dm lines in high-Z state.
+		 */
+		msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL2,
+					USB2_SUSPEND_N_SEL | USB2_SUSPEND_N,
+					USB2_SUSPEND_N_SEL | USB2_SUSPEND_N);
+		msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_UTMI_CTRL0,
+					OPMODE_MASK, OPMODE_NONDRIVING);
+		msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_CFG0,
+					UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN,
+					UTMI_PHY_DATAPATH_CTRL_OVERRIDE_EN);
+
+		phy->dpdm_enable = true;
+	}
+	mutex_unlock(&phy->phy_lock);
+
+	return ret;
+}
+
+static int msm_hsphy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+	struct msm_hsphy *phy = rdev_get_drvdata(rdev);
+
+	dev_dbg(phy->phy.dev, "%s dpdm_enable:%d\n",
+				__func__, phy->dpdm_enable);
+
+	mutex_lock(&phy->phy_lock);
+	if (phy->dpdm_enable) {
+		if (!phy->cable_connected) {
+			msm_hsphy_enable_clocks(phy, false);
+			ret = msm_hsphy_enable_power(phy, false);
+			if (ret < 0) {
+				mutex_unlock(&phy->phy_lock);
+				return ret;
+			}
+		}
+		phy->dpdm_enable = false;
+	}
+	mutex_unlock(&phy->phy_lock);
+
+	return ret;
+}
+
+static int msm_hsphy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct msm_hsphy *phy = rdev_get_drvdata(rdev);
+
+	dev_dbg(phy->phy.dev, "%s dpdm_enable:%d\n",
+			__func__, phy->dpdm_enable);
+
+	return phy->dpdm_enable;
+}
+
+static struct regulator_ops msm_hsphy_dpdm_regulator_ops = {
+	.enable		= msm_hsphy_dpdm_regulator_enable,
+	.disable	= msm_hsphy_dpdm_regulator_disable,
+	.is_enabled	= msm_hsphy_dpdm_regulator_is_enabled,
+};
+
+static int msm_hsphy_regulator_init(struct msm_hsphy *phy)
+{
+	struct device *dev = phy->phy.dev;
+	struct regulator_config cfg = {};
+	struct regulator_init_data *init_data;
+
+	init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+	if (!init_data)
+		return -ENOMEM;
+
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+	phy->dpdm_rdesc.owner = THIS_MODULE;
+	phy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+	phy->dpdm_rdesc.ops = &msm_hsphy_dpdm_regulator_ops;
+	phy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+	cfg.dev = dev;
+	cfg.init_data = init_data;
+	cfg.driver_data = phy;
+	cfg.of_node = dev->of_node;
+
+	phy->dpdm_rdev = devm_regulator_register(dev, &phy->dpdm_rdesc, &cfg);
+	if (IS_ERR(phy->dpdm_rdev))
+		return PTR_ERR(phy->dpdm_rdev);
+
+	return 0;
+}
+
 static int msm_hsphy_probe(struct platform_device *pdev)
 {
 	struct msm_hsphy *phy;
@@ -674,6 +797,7 @@
 		goto err_ret;
 	}
 
+	mutex_init(&phy->phy_lock);
 	platform_set_drvdata(pdev, phy);
 
 	if (phy->emu_init_seq)
@@ -689,6 +813,12 @@
 	if (ret)
 		return ret;
 
+	ret = msm_hsphy_regulator_init(phy);
+	if (ret) {
+		usb_remove_phy(&phy->phy);
+		return ret;
+	}
+
 	return 0;
 
 err_ret:
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index aaf56a8..591d878 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4295,6 +4295,7 @@
 
 int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
 {
+#if 0
 	struct super_block *sb = inode->i_sb;
 	ext4_lblk_t first_block, stop_block;
 	struct address_space *mapping = inode->i_mapping;
@@ -4428,6 +4429,12 @@
 out_mutex:
 	inode_unlock(inode);
 	return ret;
+#else
+	/*
+	 * Disabled as per b/28760453
+	 */
+	return -EOPNOTSUPP;
+#endif
 }
 
 int ext4_inode_attach_jinode(struct inode *inode)
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 97ea41d..cdc6799 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -1007,6 +1007,51 @@
 	/** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */
 	unsigned bad_edid_counter;
 
+	/*
+	 * @pt_scan_info: PT scan info obtained from the VCDB of EDID
+	 * @it_scan_info: IT scan info obtained from the VCDB of EDID
+	 * @ce_scan_info: CE scan info obtained from the VCDB of EDID
+	 * @hdr_eotf: Electro optical transfer function obtained from HDR block
+	 * @hdr_metadata_type_one: Metadata type one obtained from HDR block
+	 * @hdr_max_luminance: desired max luminance obtained from HDR block
+	 * @hdr_avg_luminance: desired avg luminance obtained from HDR block
+	 * @hdr_min_luminance: desired min luminance obtained from HDR block
+	 * @hdr_supported: does the sink support HDR content
+	 * @max_tmds_char: indicates the maximum TMDS Character Rate supported
+	 * @scdc_present: when set the sink supports SCDC functionality
+	 * @rr_capable: when set the sink is capable of initiating an
+	 *		SCDC read request
+	 * @supports_scramble: when set the sink supports less than
+	 *		340Mcsc scrambling
+	 * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h
+	 *		DRM_EDID_3D_*)
+	 */
+	u8 pt_scan_info;
+	u8 it_scan_info;
+	u8 ce_scan_info;
+	u32 hdr_eotf;
+	bool hdr_metadata_type_one;
+	u32 hdr_max_luminance;
+	u32 hdr_avg_luminance;
+	u32 hdr_min_luminance;
+	bool hdr_supported;
+
+	/* EDID bits HDMI 2.0
+	 * @max_tmds_char: indicates the maximum TMDS Character Rate supported
+	 * @scdc_present: when set the sink supports SCDC functionality
+	 * @rr_capable: when set the sink is capable of initiating an
+	 *		SCDC read request
+	 * @supports_scramble: when set the sink supports less than
+	 *		340Mcsc scrambling
+	 * @flags_3d: 3D view(s) supported by the sink, see drm_edid.h
+	 *		(DRM_EDID_3D_*)
+	 */
+	int max_tmds_char;	/* in Mcsc */
+	bool scdc_present;
+	bool rr_capable;
+	bool supports_scramble;
+	int flags_3d;
+
 	/**
 	 * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used
 	 * in Displayport compliance testing - Displayport Link CTS Core 1.2
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 05cc31b..decd918 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -550,6 +550,8 @@
 # define DP_TEST_LINK_EDID_READ		    (1 << 2)
 # define DP_TEST_LINK_PHY_TEST_PATTERN	    (1 << 3) /* DPCD >= 1.1 */
 # define DP_TEST_LINK_FAUX_PATTERN	    (1 << 4) /* DPCD >= 1.2 */
+# define DP_TEST_LINK_AUDIO_PATTERN     (1 << 5)
+# define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6)
 
 #define DP_TEST_LINK_RATE		    0x219
 # define DP_LINK_RATE_162		    (0x6)
@@ -627,6 +629,14 @@
 # define DP_TEST_COUNT_MASK		    0xf
 
 #define DP_TEST_PHY_PATTERN                 0x248
+# define DP_TEST_PHY_PATTERN_NONE			0x0
+# define DP_TEST_PHY_PATTERN_D10_2_NO_SCRAMBLING	0x1
+# define DP_TEST_PHY_PATTERN_SYMBOL_ERR_MEASUREMENT_CNT 0x2
+# define DP_TEST_PHY_PATTERN_PRBS7			0x3
+# define DP_TEST_PHY_PATTERN_80_BIT_CUSTOM_PATTERN	0x4
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_1		0x5
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_2		0x6
+# define DP_TEST_PHY_PATTERN_CP2520_PATTERN_3		0x7
 #define DP_TEST_80BIT_CUSTOM_PATTERN_7_0    0x250
 #define	DP_TEST_80BIT_CUSTOM_PATTERN_15_8   0x251
 #define	DP_TEST_80BIT_CUSTOM_PATTERN_23_16  0x252
@@ -648,6 +658,19 @@
 #define DP_TEST_SINK			    0x270
 # define DP_TEST_SINK_START		    (1 << 0)
 
+#define DP_TEST_AUDIO_MODE		    0x271
+
+#define DP_TEST_AUDIO_PATTERN_TYPE	    0x272
+
+#define DP_TEST_AUDIO_PERIOD_CH1	    0x273
+#define DP_TEST_AUDIO_PERIOD_CH2	    0x274
+#define DP_TEST_AUDIO_PERIOD_CH3	    0x275
+#define DP_TEST_AUDIO_PERIOD_CH4	    0x276
+#define DP_TEST_AUDIO_PERIOD_CH5	    0x277
+#define DP_TEST_AUDIO_PERIOD_CH6	    0x278
+#define DP_TEST_AUDIO_PERIOD_CH7	    0x279
+#define DP_TEST_AUDIO_PERIOD_CH8	    0x27A
+
 #define DP_FEC_STATUS			    0x280    /* 1.4 */
 # define DP_FEC_DECODE_EN_DETECTED	    (1 << 0)
 # define DP_FEC_DECODE_DIS_DETECTED	    (1 << 1)
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index e3c4048..8c3005b 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -279,6 +279,11 @@
 
 #define DRM_ELD_CEA_SAD(mnl, sad)	(20 + (mnl) + 3 * (sad))
 
+/* HDMI 2.0 */
+#define DRM_EDID_3D_INDEPENDENT_VIEW	(1 << 2)
+#define DRM_EDID_3D_DUAL_VIEW		(1 << 1)
+#define DRM_EDID_3D_OSD_DISPARITY	(1 << 0)
+
 struct edid {
 	u8 header[8];
 	/* Vendor & product info */
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 4fef190..328f232 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -21,12 +21,18 @@
 #define MIPI_DSI_MSG_REQ_ACK	BIT(0)
 /* use Low Power Mode to transmit message */
 #define MIPI_DSI_MSG_USE_LPM	BIT(1)
+/* read mipi_dsi_msg.ctrl and unicast to only that ctrls */
+#define MIPI_DSI_MSG_UNICAST	BIT(2)
+/* Stack all commands until lastcommand bit and trigger all in one go */
+#define MIPI_DSI_MSG_LASTCOMMAND BIT(3)
 
 /**
  * struct mipi_dsi_msg - read/write DSI buffer
  * @channel: virtual channel id
  * @type: payload data type
  * @flags: flags controlling this message transmission
+ * @ctrl: ctrl index to transmit on
+ * @wait_ms: duration in ms to wait after message transmission
  * @tx_len: length of @tx_buf
  * @tx_buf: data to be written
  * @rx_len: length of @rx_buf
@@ -36,6 +42,8 @@
 	u8 channel;
 	u8 type;
 	u16 flags;
+	u32 ctrl;
+	u32 wait_ms;
 
 	size_t tx_len;
 	const void *tx_buf;
@@ -134,6 +142,10 @@
 #define MIPI_DSI_CLOCK_NON_CONTINUOUS	BIT(10)
 /* transmit data in low power */
 #define MIPI_DSI_MODE_LPM		BIT(11)
+/* disable BLLP area */
+#define MIPI_DSI_MODE_VIDEO_BLLP	BIT(12)
+/* disable EOF BLLP area */
+#define MIPI_DSI_MODE_VIDEO_EOF_BLLP	BIT(13)
 
 enum mipi_dsi_pixel_format {
 	MIPI_DSI_FMT_RGB888,
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index c34a3e8..6292fa6 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -60,7 +60,7 @@
 	void (*free_cb)(struct kref *kref);
 };
 
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
 /**
  * struct drm_object_properties - property tracking for &drm_mode_object
  */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index caf40ad..84ad7b5 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -121,6 +121,7 @@
 	CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
 	CPUHP_AP_ARM_TWD_STARTING,
 	CPUHP_AP_QCOM_TIMER_STARTING,
+	CPUHP_AP_QCOM_SLEEP_STARTING,
 	CPUHP_AP_ARMADA_TIMER_STARTING,
 	CPUHP_AP_MARCO_TIMER_STARTING,
 	CPUHP_AP_MIPS_GIC_TIMER_STARTING,
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index e8ca5e6..64214f3 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -25,6 +25,8 @@
 #include <linux/iommu.h>
 #include <linux/msi.h>
 
+struct iova_domain;
+
 int iommu_dma_init(void);
 
 /* Domain management interface for IOMMU drivers */
@@ -56,6 +58,11 @@
 		unsigned long offset, size_t size, int prot);
 int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
 		int nents, int prot);
+size_t iommu_dma_prepare_map_sg(struct device *dev, struct iova_domain *iovad,
+				struct scatterlist *sg, int nents);
+int iommu_dma_finalise_sg(struct device *dev, struct scatterlist *sg,
+		int nents, dma_addr_t dma_addr);
+void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents);
 
 /*
  * Arch code with no special attribute handling may use these
diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h
index 3beb30e..54be2ea 100644
--- a/include/linux/dma-mapping-fast.h
+++ b/include/linux/dma-mapping-fast.h
@@ -10,10 +10,14 @@
 #include <linux/io-pgtable-fast.h>
 
 struct dma_iommu_mapping;
+struct io_pgtable_ops;
+struct iova_domain;
 
 struct dma_fast_smmu_mapping {
 	struct device		*dev;
 	struct iommu_domain	*domain;
+	struct iova_domain	*iovad;
+
 	dma_addr_t	 base;
 	size_t		 size;
 	size_t		 num_4k_pages;
@@ -28,18 +32,16 @@
 	bool		have_stale_tlbs;
 
 	dma_addr_t	pgtbl_dma_handle;
-	av8l_fast_iopte	*pgtbl_pmds;
+	struct io_pgtable_ops *pgtbl_ops;
 
 	spinlock_t	lock;
 	struct notifier_block notifier;
-
-	int		is_smmu_pt_coherent;
 };
 
 #ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
 int fast_smmu_init_mapping(struct device *dev,
 			    struct dma_iommu_mapping *mapping);
-void fast_smmu_release_mapping(struct kref *kref);
+void fast_smmu_put_dma_cookie(struct iommu_domain *domain);
 #else
 static inline int fast_smmu_init_mapping(struct device *dev,
 					  struct dma_iommu_mapping *mapping)
@@ -47,9 +49,7 @@
 	return -ENODEV;
 }
 
-static inline void fast_smmu_release_mapping(struct kref *kref)
-{
-}
+static inline void fast_smmu_put_dma_cookie(struct iommu_domain *domain) {}
 #endif
 
 #endif /* __LINUX_DMA_MAPPING_FAST_H */
diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h
index 1660811..ce52873 100644
--- a/include/linux/hdcp_qseecom.h
+++ b/include/linux/hdcp_qseecom.h
@@ -53,14 +53,19 @@
 void *hdcp1_init(void);
 void hdcp1_deinit(void *data);
 bool hdcp1_feature_supported(void *data);
-int hdcp1_set_keys(void *data, uint32_t *aksv_msb, uint32_t *aksv_lsb);
+int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb);
 int hdcp1_set_enc(void *data, bool enable);
+void hdcp1_stop(void *data);
 
 void *hdcp2_init(u32 device_type);
 void hdcp2_deinit(void *ctx);
 bool hdcp2_feature_supported(void *ctx);
 int hdcp2_app_comm(void *ctx, enum hdcp2_app_cmd cmd,
 		struct hdcp2_app_data *app_data);
+int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id,
+		uint8_t stream_number, uint32_t *stream_id);
+int hdcp2_close_stream(void *ctx, uint32_t stream_id);
+int hdcp2_force_encryption(void *ctx, uint32_t enable);
 #else
 static inline void *hdcp1_init(void)
 {
@@ -76,8 +81,7 @@
 	return false;
 }
 
-static inline int hdcp1_set_keys(void *data, uint32_t *aksv_msb,
-		uint32_t *aksv_lsb)
+static inline int hdcp1_start(void *data, u32 *aksv_msb, u32 *aksv_lsb)
 {
 	return 0;
 }
@@ -87,6 +91,10 @@
 	return 0;
 }
 
+static inline void hdcp1_stop(void *data)
+{
+}
+
 static inline void *hdcp2_init(u32 device_type)
 {
 	return NULL;
@@ -106,6 +114,22 @@
 {
 	return 0;
 }
+
+static inline int hdcp2_open_stream(void *ctx, uint8_t vc_payload_id,
+		uint8_t stream_number, uint32_t *stream_id)
+{
+	return 0;
+}
+
+static inline int hdcp2_close_stream(void *ctx, uint32_t stream_id)
+{
+	return 0;
+}
+
+static inline int hdcp2_force_encryption(void *ctx, uint32_t enable)
+{
+	return 0;
+}
 #endif
 
 #endif /* __HDCP_QSEECOM_H */
diff --git a/include/linux/io-pgtable-fast.h b/include/linux/io-pgtable-fast.h
index 37b3755..7eaf45f 100644
--- a/include/linux/io-pgtable-fast.h
+++ b/include/linux/io-pgtable-fast.h
@@ -8,13 +8,63 @@
 
 #include <linux/notifier.h>
 
+/*
+ * This ought to be private to io-pgtable-fast, but dma-mapping-fast
+ * currently requires it for a debug usecase.
+ */
 typedef u64 av8l_fast_iopte;
 
-#define iopte_pmd_offset(pmds, iova) (pmds + (iova >> 12))
+struct io_pgtable_ops;
 
-int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
-			 int prot);
-void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+
+int av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova,
+			 phys_addr_t paddr, size_t size, int prot);
+
+void av8l_fast_unmap_public(struct io_pgtable_ops *ops, unsigned long iova,
+				size_t size);
+
+int av8l_fast_map_sg_public(struct io_pgtable_ops *ops,
+			unsigned long iova, struct scatterlist *sgl,
+			unsigned int nents, int prot, size_t *size);
+
+bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops,
+					unsigned long iova);
+
+phys_addr_t av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops,
+					  unsigned long iova);
+#else
+static inline int
+av8l_fast_map_public(struct io_pgtable_ops *ops, unsigned long iova,
+		     phys_addr_t paddr, size_t size, int prot)
+{
+	return -EINVAL;
+}
+static inline void av8l_fast_unmap_public(struct io_pgtable_ops *ops,
+					  unsigned long iova, size_t size)
+{
+}
+
+static inline int av8l_fast_map_sg_public(struct io_pgtable_ops *ops,
+				unsigned long iova, struct scatterlist *sgl,
+				unsigned int nents, int prot, size_t *size)
+{
+	return 0;
+}
+
+static inline bool av8l_fast_iova_coherent_public(struct io_pgtable_ops *ops,
+						  unsigned long iova)
+{
+	return false;
+}
+static inline phys_addr_t
+av8l_fast_iova_to_phys_public(struct io_pgtable_ops *ops,
+				  unsigned long iova)
+{
+	return 0;
+}
+#endif /* CONFIG_IOMMU_IO_PGTABLE_FAST */
+
 
 /* events for notifiers passed to av8l_register_notify */
 #define MAPPED_OVER_STALE_TLB 1
@@ -29,14 +79,14 @@
  */
 #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa
 
-void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, bool skip_sync);
+void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops, bool skip_sync);
 void av8l_register_notify(struct notifier_block *nb);
 
 #else  /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
 
 #define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0
 
-static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds,
+static inline void av8l_fast_clear_stale_ptes(struct io_pgtable_ops *ops,
 					      bool skip_sync)
 {
 }
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 23cae96..4d9d70e 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -73,7 +73,7 @@
 };
 
 struct iommu_pgtbl_info {
-	void *pmds;
+	void *ops;
 };
 
 /* Domain feature flags */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 834683d..fc0ce6c 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -50,6 +50,7 @@
 #define LED_PANIC_INDICATOR	BIT(20)
 #define LED_BRIGHT_HW_CHANGED	BIT(21)
 #define LED_RETAIN_AT_SHUTDOWN	BIT(22)
+#define LED_KEEP_TRIGGER	BIT(23)
 
 	/* set_brightness_work / blink_timer flags, atomic, private. */
 	unsigned long		work_flags;
diff --git a/include/linux/msm_drm_notify.h b/include/linux/msm_drm_notify.h
new file mode 100644
index 0000000..3db08cf
--- /dev/null
+++ b/include/linux/msm_drm_notify.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ */
+#ifndef _MSM_DRM_NOTIFY_H_
+#define _MSM_DRM_NOTIFY_H_
+
+#include <linux/notifier.h>
+
+/* A hardware display blank change occurred */
+#define MSM_DRM_EVENT_BLANK			0x01
+/* A hardware display blank early change occurred */
+#define MSM_DRM_EARLY_EVENT_BLANK		0x02
+
+enum {
+	/* panel: power on */
+	MSM_DRM_BLANK_UNBLANK,
+	/* panel: power off */
+	MSM_DRM_BLANK_POWERDOWN,
+};
+
+enum msm_drm_display_id {
+	/* primary display */
+	MSM_DRM_PRIMARY_DISPLAY,
+	/* external display */
+	MSM_DRM_EXTERNAL_DISPLAY,
+	MSM_DRM_DISPLAY_MAX
+};
+
+struct msm_drm_notifier {
+	enum msm_drm_display_id id;
+	void *data;
+};
+
+int msm_drm_register_client(struct notifier_block *nb);
+int msm_drm_unregister_client(struct notifier_block *nb);
+#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d837dad..e86a358 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2267,13 +2267,10 @@
 	/* Used in GRE, set in fou/gue_gro_receive */
 	u8	is_fou:1;
 
-	/* Used to determine if flush_id can be ignored */
-	u8	is_atomic:1;
-
 	/* Number of gro_receive callbacks this packet already went through */
 	u8 recursion_counter:4;
 
-	/* 1 bit hole */
+	/* 2 bit hole */
 
 	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 	__wsum	csum;
@@ -2934,12 +2931,15 @@
  */
 struct softnet_data {
 	struct list_head	poll_list;
+	struct napi_struct	*current_napi;
 	struct sk_buff_head	process_queue;
 
 	/* stats */
 	unsigned int		processed;
 	unsigned int		time_squeeze;
 	unsigned int		received_rps;
+	unsigned int            gro_coalesced;
+
 #ifdef CONFIG_RPS
 	struct softnet_data	*rps_ipi_list;
 #endif
@@ -3536,6 +3536,7 @@
 gro_result_t napi_gro_frags(struct napi_struct *napi);
 struct packet_offload *gro_find_receive_by_type(__be16 type);
 struct packet_offload *gro_find_complete_by_type(__be16 type);
+extern struct napi_struct *get_current_napi_context(void);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 15c236b..e70d443 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -584,6 +584,12 @@
 	int				group_caps;
 
 	struct perf_event		*group_leader;
+
+	/*
+	 * Protect the pmu, attributes and context of a group leader.
+	 * Note: does not protect the pointer to the group_leader.
+	 */
+	struct mutex			group_leader_mutex;
 	struct pmu			*pmu;
 	void				*pmu_private;
 
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 9788360..0ea3e1b 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -266,6 +266,9 @@
 #define plist_next(pos) \
 	list_next_entry(pos, node_list)
 
+#define plist_next_entry(pos, type, member)   \
+	container_of(plist_next(pos), type, member)
+
 /**
  * plist_prev - get the prev entry in list
  * @pos:	the type * to cursor
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 56518ad..15195c0 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -39,7 +39,7 @@
  * current PWM hardware state.
  */
 struct pwm_args {
-	unsigned int period;
+	u64 period;
 	enum pwm_polarity polarity;
 };
 
@@ -48,6 +48,29 @@
 	PWMF_EXPORTED = 1 << 1,
 };
 
+/**
+ * enum pwm_output_type - output type of the PWM signal
+ * @PWM_OUTPUT_FIXED: PWM output is fixed until a change request
+ * @PWM_OUTPUT_MODULATED: PWM output is modulated in hardware
+ * autonomously with a predefined pattern
+ */
+enum pwm_output_type {
+	PWM_OUTPUT_FIXED = 1 << 0,
+	PWM_OUTPUT_MODULATED = 1 << 1,
+};
+
+/**
+ * struct pwm_output_pattern - PWM duty pattern for MODULATED duty type
+ * @duty_pattern: PWM duty cycles in the pattern for duty modulation
+ * @num_entries: number of entries in the pattern
+ * @cycles_per_duty: number of PWM period cycles an entry stays at
+ */
+struct pwm_output_pattern {
+	u64 *duty_pattern;
+	unsigned int num_entries;
+	u64 cycles_per_duty;
+};
+
 /*
  * struct pwm_state - state of a PWM channel
  * @period: PWM period (in nanoseconds)
@@ -56,9 +79,11 @@
  * @enabled: PWM enabled status
  */
 struct pwm_state {
-	unsigned int period;
-	unsigned int duty_cycle;
+	u64 period;
+	u64 duty_cycle;
 	enum pwm_polarity polarity;
+	enum pwm_output_type output_type;
+	struct pwm_output_pattern *output_pattern;
 	bool enabled;
 };
 
@@ -111,12 +136,30 @@
 		pwm->state.period = period;
 }
 
+static inline void pwm_set_period_extend(struct pwm_device *pwm, u64 period)
+{
+	if (pwm)
+		pwm->state.period = period;
+}
+
 static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
 {
 	struct pwm_state state;
 
 	pwm_get_state(pwm, &state);
 
+	if (state.period > UINT_MAX)
+		pr_warn("PWM period %llu is truncated\n", state.period);
+
+	return (unsigned int)state.period;
+}
+
+static inline u64 pwm_get_period_extend(const struct pwm_device *pwm)
+{
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+
 	return state.period;
 }
 
@@ -126,12 +169,30 @@
 		pwm->state.duty_cycle = duty;
 }
 
+static inline void pwm_set_duty_cycle_extend(struct pwm_device *pwm, u64 duty)
+{
+	if (pwm)
+		pwm->state.duty_cycle = duty;
+}
+
 static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
 {
 	struct pwm_state state;
 
 	pwm_get_state(pwm, &state);
 
+	if (state.duty_cycle > UINT_MAX)
+		pr_warn("PWM duty cycle %llu is truncated\n", state.duty_cycle);
+
+	return (unsigned int)state.duty_cycle;
+}
+
+static inline u64 pwm_get_duty_cycle_extend(const struct pwm_device *pwm)
+{
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+
 	return state.duty_cycle;
 }
 
@@ -144,6 +205,26 @@
 	return state.polarity;
 }
 
+static inline enum pwm_output_type pwm_get_output_type(
+		const struct pwm_device *pwm)
+{
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+
+	return state.output_type;
+}
+
+static inline struct pwm_output_pattern *pwm_get_output_pattern(
+				struct pwm_device *pwm)
+{
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+
+	return pwm->state.output_pattern ?: NULL;
+}
+
 static inline void pwm_get_args(const struct pwm_device *pwm,
 				struct pwm_args *args)
 {
@@ -243,10 +324,15 @@
  * @request: optional hook for requesting a PWM
  * @free: optional hook for freeing a PWM
  * @config: configure duty cycles and period length for this PWM
+ * @config_extend: configure duty cycles and period length for this
+ *	PWM with u64 data type
  * @set_polarity: configure the polarity of this PWM
  * @capture: capture and report PWM signal
  * @enable: enable PWM output toggling
  * @disable: disable PWM output toggling
+ * @get_output_type_supported: get the supported output type
+ * @set_output_type: set PWM output type
+ * @set_output_pattern: set the pattern for the modulated output
  * @apply: atomically apply a new PWM config. The state argument
  *	   should be adjusted with the real hardware config (if the
  *	   approximate the period or duty_cycle value, state should
@@ -262,12 +348,21 @@
 	void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
 	int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
 		      int duty_ns, int period_ns);
+	int (*config_extend)(struct pwm_chip *chip, struct pwm_device *pwm,
+		      u64 duty_ns, u64 period_ns);
 	int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
 			    enum pwm_polarity polarity);
 	int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm,
 		       struct pwm_capture *result, unsigned long timeout);
 	int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
 	void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
+	int (*get_output_type_supported)(struct pwm_chip *chip,
+			struct pwm_device *pwm);
+	int (*set_output_type)(struct pwm_chip *chip, struct pwm_device *pwm,
+			enum pwm_output_type output_type);
+	int (*set_output_pattern)(struct pwm_chip *chip,
+			struct pwm_device *pwm,
+			struct pwm_output_pattern *output_pattern);
 	int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm,
 		     struct pwm_state *state);
 	void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -309,8 +404,8 @@
  * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
  */
 struct pwm_capture {
-	unsigned int period;
-	unsigned int duty_cycle;
+	u64 period;
+	u64 duty_cycle;
 };
 
 #if IS_ENABLED(CONFIG_PWM)
@@ -321,6 +416,21 @@
 int pwm_adjust_config(struct pwm_device *pwm);
 
 /**
+ * pwm_output_type_support()
+ * @pwm: PWM device
+ *
+ * Returns:  output types supported by the PWM device
+ */
+static inline int pwm_get_output_type_supported(struct pwm_device *pwm)
+{
+	if (pwm->chip->ops->get_output_type_supported != NULL)
+		return pwm->chip->ops->
+			get_output_type_supported(pwm->chip, pwm);
+	else
+		return PWM_OUTPUT_FIXED;
+}
+
+/**
  * pwm_config() - change a PWM device configuration
  * @pwm: PWM device
  * @duty_ns: "on" time (in nanoseconds)
@@ -349,6 +459,31 @@
 }
 
 /**
+ * pwm_config_extend() - change PWM period and duty length with u64 data type
+ * @pwm: PWM device
+ * @duty_ns: "on" time (in nanoseconds)
+ * @period_ns: duration (in nanoseconds) of one cycle
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static inline int pwm_config_extend(struct pwm_device *pwm, u64 duty_ns,
+			     u64 period_ns)
+{
+	struct pwm_state state;
+
+	if (!pwm)
+		return -EINVAL;
+
+	pwm_get_state(pwm, &state);
+	if (state.duty_cycle == duty_ns && state.period == period_ns)
+		return 0;
+
+	state.duty_cycle = duty_ns;
+	state.period = period_ns;
+	return pwm_apply_state(pwm, &state);
+}
+
+/**
  * pwm_set_polarity() - configure the polarity of a PWM signal
  * @pwm: PWM device
  * @polarity: new polarity of the PWM signal
diff --git a/include/linux/sde_rsc.h b/include/linux/sde_rsc.h
index e1dc0cd..a409da1 100644
--- a/include/linux/sde_rsc.h
+++ b/include/linux/sde_rsc.h
@@ -277,13 +277,13 @@
 	return 0;
 }
 
-int sde_rsc_client_get_vsync_refcount(
+static inline int sde_rsc_client_get_vsync_refcount(
 		struct sde_rsc_client *caller_client)
 {
 	return 0;
 }
 
-int sde_rsc_client_reset_vsync_refcount(
+static inline int sde_rsc_client_reset_vsync_refcount(
 		struct sde_rsc_client *caller_client)
 {
 	return 0;
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h
index f4de336..bdf9047 100644
--- a/include/linux/soc/qcom/qmi.h
+++ b/include/linux/soc/qcom/qmi.h
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2018 The Linux Foundation. All rights reserved.
  * Copyright (c) 2017, Linaro Ltd.
  */
 #ifndef __QMI_HELPERS_H__
@@ -166,7 +166,7 @@
 struct qmi_txn {
 	struct qmi_handle *qmi;
 
-	int id;
+	u16 id;
 
 	struct mutex lock;
 	struct completion completion;
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 8e2c11e..4750036 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -178,6 +178,7 @@
 
 #define SWAP_CLUSTER_MAX 32UL
 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
+#define SWAPFILE_CLUSTER	256
 
 #define SWAP_MAP_MAX	0x3e	/* Max duplication count, in first swap_map */
 #define SWAP_MAP_BAD	0x3f	/* Note pageblock is bad, in first swap_map */
@@ -273,6 +274,8 @@
 					 */
 	struct work_struct discard_work; /* discard worker */
 	struct swap_cluster_list discard_clusters; /* discard clusters list */
+	unsigned int write_pending;
+	unsigned int max_writes;
 };
 
 #ifdef CONFIG_64BIT
@@ -355,6 +358,8 @@
 						unsigned long *nr_scanned);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
+extern int sysctl_swap_ratio;
+extern int sysctl_swap_ratio_enable;
 extern int remove_mapping(struct address_space *mapping, struct page *page);
 extern unsigned long vm_total_pages;
 
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index e06febf..d797307 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -8,9 +8,14 @@
  */
 extern spinlock_t swap_lock;
 extern struct plist_head swap_active_head;
+extern spinlock_t swap_avail_lock;
+extern struct plist_head *swap_avail_heads;
 extern struct swap_info_struct *swap_info[];
 extern int try_to_unuse(unsigned int, bool, unsigned long);
 extern unsigned long generic_max_swapfile_size(void);
 extern unsigned long max_swapfile_size(void);
+extern int swap_ratio(struct swap_info_struct **si, int node);
+extern void setup_swap_ratio(struct swap_info_struct *p, int prio);
+extern bool is_swap_ratio_group(int prio);
 
 #endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 55388ab..535f528 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -281,4 +281,5 @@
 		__tick_nohz_task_switch();
 }
 
+ktime_t *get_next_event_cpu(unsigned int cpu);
 #endif
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 7584cab..b4b39b2 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -46,6 +46,9 @@
 #define FUNC_SUSPEND_OPT_SUSP_MASK BIT(0)
 #define FUNC_SUSPEND_OPT_RW_EN_MASK BIT(1)
 
+#define FUNC_WAKEUP_CAPABLE_SHIFT  0
+#define FUNC_WAKEUP_ENABLE_SHIFT   1
+
 /*
  * USB function drivers should return USB_GADGET_DELAYED_STATUS if they
  * wish to delay the data/status stages of the control transfer till they
diff --git a/include/net/ip.h b/include/net/ip.h
index e44b1a4..a00f244 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -330,6 +330,8 @@
 
 __be32 inet_current_timestamp(void);
 
+extern int sysctl_reserved_port_bind;
+
 /* From inetpeer.c */
 extern int inet_peer_threshold;
 extern int inet_peer_minttl;
diff --git a/include/net/rmnet_config.h b/include/net/rmnet_config.h
new file mode 100644
index 0000000..01efc3b
--- /dev/null
+++ b/include/net/rmnet_config.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * Legacy RMNET Data MAP header definitions
+ */
+
+#ifndef _RMNET_CONFIG_H_
+#define _RMNET_CONFIG_H_
+
+struct rmnet_map_header_s {
+#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS
+	uint8_t  pad_len:6;
+	uint8_t  reserved_bit:1;
+	uint8_t  cd_bit:1;
+#else
+	uint8_t  cd_bit:1;
+	uint8_t  reserved_bit:1;
+	uint8_t  pad_len:6;
+#endif /* RMNET_USE_BIG_ENDIAN_STRUCTS */
+	uint8_t  mux_id;
+	uint16_t pkt_len;
+}  __aligned(1);
+
+#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header_s *)Y->data)->mux_id)
+#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header_s *)Y->data)->cd_bit)
+#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header_s *)Y->data)->pad_len)
+#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command_s *) \
+				  (Y->data + sizeof(struct rmnet_map_header_s)))
+#define RMNET_MAP_GET_LENGTH(Y) (ntohs( \
+			       ((struct rmnet_map_header_s *)Y->data)->pkt_len))
+
+#endif /* _RMNET_CONFIG_H_ */
diff --git a/include/soc/qcom/cx_ipeak.h b/include/soc/qcom/cx_ipeak.h
new file mode 100644
index 0000000..8ebc113
--- /dev/null
+++ b/include/soc/qcom/cx_ipeak.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_COM_CX_IPEAK_H
+#define __SOC_COM_CX_IPEAK_H
+
+struct device_node;
+struct cx_ipeak_client;
+
+#ifndef CONFIG_QCOM_CX_IPEAK
+
+static inline struct cx_ipeak_client *cx_ipeak_register(
+		struct device_node *dev_node,
+		const char *client_name)
+{
+	return NULL;
+}
+
+static inline void cx_ipeak_unregister(struct cx_ipeak_client *client)
+{
+}
+
+static inline int cx_ipeak_update(struct cx_ipeak_client *ipeak_client,
+			bool vote)
+{
+	return 0;
+}
+#else
+
+struct cx_ipeak_client *cx_ipeak_register(struct device_node *dev_node,
+		const char *client_name);
+void cx_ipeak_unregister(struct cx_ipeak_client *client);
+int cx_ipeak_update(struct cx_ipeak_client *ipeak_client, bool vote);
+
+#endif
+
+#endif /*__SOC_COM_CX_IPEAK_H*/
diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h
new file mode 100644
index 0000000..7ca4c31
--- /dev/null
+++ b/include/soc/qcom/qmi_rmnet.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _QMI_RMNET_H
+#define _QMI_RMNET_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+struct qmi_rmnet_ps_ind {
+	void (*ps_on_handler)(void *port);
+	void (*ps_off_handler)(void *port);
+	struct list_head list;
+};
+
+
+#ifdef CONFIG_QCOM_QMI_RMNET
+void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
+void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt);
+void qmi_rmnet_enable_all_flows(struct net_device *dev);
+#else
+static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
+{
+}
+
+static inline void
+qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
+{
+}
+
+static inline void
+qmi_rmnet_enable_all_flows(struct net_device *dev)
+{
+}
+#endif
+
+#ifdef CONFIG_QCOM_QMI_DFC
+void *qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id);
+void qmi_rmnet_qos_exit(struct net_device *dev, void *qos);
+void qmi_rmnet_burst_fc_check(struct net_device *dev,
+			      int ip_type, u32 mark, unsigned int len);
+int qmi_rmnet_get_queue(struct net_device *dev, struct sk_buff *skb);
+#else
+static inline void *
+qmi_rmnet_qos_init(struct net_device *real_dev, u8 mux_id)
+{
+	return NULL;
+}
+
+static inline void qmi_rmnet_qos_exit(struct net_device *dev, void *qos)
+{
+}
+
+static inline void
+qmi_rmnet_burst_fc_check(struct net_device *dev,
+			 int ip_type, u32 mark, unsigned int len)
+{
+}
+
+static inline int qmi_rmnet_get_queue(struct net_device *dev,
+				       struct sk_buff *skb)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
+int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable);
+void qmi_rmnet_work_init(void *port);
+void qmi_rmnet_work_exit(void *port);
+void qmi_rmnet_work_maybe_restart(void *port);
+void qmi_rmnet_work_restart(void *port);
+
+int qmi_rmnet_ps_ind_register(void *port,
+			      struct qmi_rmnet_ps_ind *ps_ind);
+int qmi_rmnet_ps_ind_deregister(void *port,
+				struct qmi_rmnet_ps_ind *ps_ind);
+void qmi_rmnet_ps_off_notify(void *port);
+void qmi_rmnet_ps_on_notify(void *port);
+
+#else
+static inline int qmi_rmnet_set_powersave_mode(void *port, uint8_t enable)
+{
+	return 0;
+}
+static inline void qmi_rmnet_work_init(void *port)
+{
+}
+static inline void qmi_rmnet_work_restart(void *port)
+{
+
+}
+static inline void qmi_rmnet_work_exit(void *port)
+{
+}
+
+static inline void qmi_rmnet_work_maybe_restart(void *port)
+{
+
+}
+
+static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
+				     struct qmi_rmnet_ps_ind *ps_ind)
+{
+	return 0;
+}
+static inline int qmi_rmnet_ps_ind_deregister(struct rmnet_port *port,
+				       struct qmi_rmnet_ps_ind *ps_ind)
+{
+	return 0;
+}
+
+static inline void qmi_rmnet_ps_off_notify(struct rmnet_port *port)
+{
+
+}
+
+static inline void qmi_rmnet_ps_on_notify(struct rmnet_port *port)
+{
+
+}
+#endif
+#endif /*_QMI_RMNET_H*/
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
new file mode 100644
index 0000000..0cd5628
--- /dev/null
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _RMNET_QMI_H
+#define _RMNET_QMI_H
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+
+void rmnet_map_tx_qmap_cmd(struct sk_buff *qmap_skb);
+
+#ifdef CONFIG_QCOM_QMI_RMNET
+void *rmnet_get_qmi_pt(void *port);
+void *rmnet_get_qos_pt(struct net_device *dev);
+void *rmnet_get_rmnet_port(struct net_device *dev);
+struct net_device *rmnet_get_rmnet_dev(void *port, u8 mux_id);
+void rmnet_reset_qmi_pt(void *port);
+void rmnet_init_qmi_pt(void *port, void *qmi);
+void rmnet_enable_all_flows(void *port);
+void rmnet_set_powersave_format(void *port);
+void rmnet_clear_powersave_format(void *port);
+void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
+int rmnet_get_powersave_notif(void *port);
+#else
+static inline void *rmnet_get_qmi_pt(void *port)
+{
+	return NULL;
+}
+
+static inline void *rmnet_get_qos_pt(struct net_device *dev)
+{
+	return NULL;
+}
+
+static inline void *rmnet_get_rmnet_port(struct net_device *dev)
+{
+	return NULL;
+}
+
+static inline struct net_device *rmnet_get_rmnet_dev(void *port,
+						     u8 mux_id)
+{
+	return NULL;
+}
+
+static inline void rmnet_reset_qmi_pt(void *port)
+{
+}
+
+static inline void rmnet_init_qmi_pt(void *port, void *qmi)
+{
+}
+
+static inline void rmnet_enable_all_flows(void *port)
+{
+}
+
+static inline void rmnet_set_port_format(void *port)
+{
+}
+
+static inline void rmnet_get_packets(void *port, u64 *rx, u64 *tx)
+{
+}
+
+static inline int rmnet_get_powersave_notif(void *port)
+{
+	return 0;
+}
+
+#endif /* CONFIG_QCOM_QMI_RMNET */
+#endif /*_RMNET_QMI_H*/
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
new file mode 100644
index 0000000..9fa33b34
--- /dev/null
+++ b/include/trace/events/dfc.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dfc
+
+#if !defined(_TRACE_DFC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DFC_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(dfc_qmi_tc,
+
+	TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, u32 grant,
+		 int qlen, u32 tcm_handle, int enable),
+
+	TP_ARGS(name, bearer_id, flow_id, grant, qlen, tcm_handle, enable),
+
+	TP_STRUCT__entry(
+		__string(dev_name, name)
+		__field(u8, bid)
+		__field(u32, fid)
+		__field(u32, grant)
+		__field(int, qlen)
+		__field(u32, tcm_handle)
+		__field(int, enable)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, name);
+		__entry->bid = bearer_id;
+		__entry->fid = flow_id;
+		__entry->grant = grant;
+		__entry->qlen = qlen;
+		__entry->tcm_handle = tcm_handle;
+		__entry->enable = enable;
+	),
+
+	TP_printk("dev=%s bearer_id=%u grant=%u len=%d flow_id=%u q=%d %s",
+		__get_str(dev_name),
+		__entry->bid, __entry->grant, __entry->qlen, __entry->fid,
+		__entry->tcm_handle,
+		__entry->enable ? "enable" : "disable")
+);
+
+TRACE_EVENT(dfc_flow_ind,
+
+	TP_PROTO(int src, int idx, u8 mux_id, u8 bearer_id, u32 grant,
+		 u16 seq_num, u8 ack_req, u32 ancillary),
+
+	TP_ARGS(src, idx, mux_id, bearer_id, grant, seq_num, ack_req,
+		ancillary),
+
+	TP_STRUCT__entry(
+		__field(int, src)
+		__field(int, idx)
+		__field(u8, mid)
+		__field(u8, bid)
+		__field(u32, grant)
+		__field(u16, seq)
+		__field(u8, ack_req)
+		__field(u32, ancillary)
+	),
+
+	TP_fast_assign(
+		__entry->src = src;
+		__entry->idx = idx;
+		__entry->mid = mux_id;
+		__entry->bid = bearer_id;
+		__entry->grant = grant;
+		__entry->seq = seq_num;
+		__entry->ack_req = ack_req;
+		__entry->ancillary = ancillary;
+	),
+
+	TP_printk("src=%d [%d]: mid=%u bid=%u grant=%u seq=%u ack=%u anc=%u",
+		__entry->src, __entry->idx, __entry->mid, __entry->bid,
+		__entry->grant, __entry->seq, __entry->ack_req,
+		__entry->ancillary)
+);
+
+TRACE_EVENT(dfc_flow_check,
+
+	TP_PROTO(const char *name, u8 bearer_id, unsigned int len, u32 grant),
+
+	TP_ARGS(name, bearer_id, len, grant),
+
+	TP_STRUCT__entry(
+		__string(dev_name, name)
+		__field(u8, bearer_id)
+		__field(unsigned int, len)
+		__field(u32, grant)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, name)
+		__entry->bearer_id = bearer_id;
+		__entry->len = len;
+		__entry->grant = grant;
+	),
+
+	TP_printk("dev=%s bearer_id=%u skb_len=%u current_grant=%u",
+		__get_str(dev_name),
+		__entry->bearer_id, __entry->len, __entry->grant)
+);
+
+TRACE_EVENT(dfc_flow_info,
+
+	TP_PROTO(const char *name, u8 bearer_id, u32 flow_id, int ip_type,
+		 u32 handle, int add),
+
+	TP_ARGS(name, bearer_id, flow_id, ip_type, handle, add),
+
+	TP_STRUCT__entry(
+		__string(dev_name, name)
+		__field(u8, bid)
+		__field(u32, fid)
+		__field(int, ip)
+		__field(u32, handle)
+		__field(int, action)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, name)
+		__entry->bid = bearer_id;
+		__entry->fid = flow_id;
+		__entry->ip = ip_type;
+		__entry->handle = handle;
+		__entry->action = add;
+	),
+
+	TP_printk("%s: dev=%s bearer_id=%u flow_id=%u ip_type=%d q=%d",
+		__entry->action ? "add flow" : "delete flow",
+		__get_str(dev_name),
+		__entry->bid, __entry->fid, __entry->ip, __entry->handle)
+);
+
+TRACE_EVENT(dfc_client_state_up,
+
+	TP_PROTO(int idx, u32 instance, u32 ep_type, u32 iface),
+
+	TP_ARGS(idx, instance, ep_type, iface),
+
+	TP_STRUCT__entry(
+		__field(int, idx)
+		__field(u32, instance)
+		__field(u32, ep_type)
+		__field(u32, iface)
+	),
+
+	TP_fast_assign(
+		__entry->idx = idx;
+		__entry->instance = instance;
+		__entry->ep_type = ep_type;
+		__entry->iface = iface;
+	),
+
+	TP_printk("DFC Client[%d] connect: instance=%u ep_type=%u iface_id=%u",
+		__entry->idx, __entry->instance,
+		__entry->ep_type, __entry->iface)
+);
+
+TRACE_EVENT(dfc_client_state_down,
+
+	TP_PROTO(int idx, int from_cb),
+
+	TP_ARGS(idx, from_cb),
+
+	TP_STRUCT__entry(
+		__field(int, idx)
+		__field(int, from_cb)
+	),
+
+	TP_fast_assign(
+		__entry->idx = idx;
+		__entry->from_cb = from_cb;
+	),
+
+	TP_printk("DFC Client[%d] exit: callback %d",
+		  __entry->idx, __entry->from_cb)
+);
+
+TRACE_EVENT(dfc_qmap_cmd,
+
+	TP_PROTO(u8 mux_id, u8 bearer_id, u16 seq_num, u8 type, u32 tran),
+
+	TP_ARGS(mux_id, bearer_id, seq_num, type, tran),
+
+	TP_STRUCT__entry(
+		__field(u8, mid)
+		__field(u8, bid)
+		__field(u16, seq)
+		__field(u8, type)
+		__field(u32, tran)
+	),
+
+	TP_fast_assign(
+		__entry->mid = mux_id;
+		__entry->bid = bearer_id;
+		__entry->seq = seq_num;
+		__entry->type = type;
+		__entry->tran = tran;
+	),
+
+	TP_printk("mux_id=%u bearer_id=%u seq_num=%u type=%u tran=%u",
+		__entry->mid, __entry->bid, __entry->seq,
+		__entry->type, __entry->tran)
+);
+
+#endif /* _TRACE_DFC_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rmnet.h b/include/trace/events/rmnet.h
new file mode 100644
index 0000000..d58f5c6
--- /dev/null
+++ b/include/trace/events/rmnet.h
@@ -0,0 +1,476 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rmnet
+
+#if !defined(_TRACE_RMNET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RMNET_H
+
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+
+/*****************************************************************************/
+/* Trace events for rmnet module */
+/*****************************************************************************/
+TRACE_EVENT(rmnet_xmit_skb,
+
+	TP_PROTO(struct sk_buff *skb),
+
+	TP_ARGS(skb),
+
+	TP_STRUCT__entry(
+		__string(dev_name, skb->dev->name)
+		__field(unsigned int, len)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, skb->dev->name);
+		__entry->len = skb->len;
+	),
+
+	TP_printk("dev_name=%s len=%u", __get_str(dev_name), __entry->len)
+);
+
+TRACE_EVENT(rmnet_low,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_high,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_err,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+/*****************************************************************************/
+/* Trace events for rmnet_perf module */
+/*****************************************************************************/
+TRACE_EVENT(rmnet_perf_low,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_perf_high,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_perf_err,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+/*****************************************************************************/
+/* Trace events for rmnet_shs module */
+/*****************************************************************************/
+TRACE_EVENT(rmnet_shs_low,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_shs_high,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_shs_err,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_shs_wq_low,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_shs_wq_high,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+TRACE_EVENT(rmnet_shs_wq_err,
+
+	TP_PROTO(u8 func, u8 evt, u32 uint1, u32 uint2,
+		 u64 ulong1, u64 ulong2, void *ptr1, void *ptr2),
+
+	TP_ARGS(func, evt, uint1, uint2, ulong1, ulong2, ptr1, ptr2),
+
+	TP_STRUCT__entry(
+		__field(u8, func)
+		__field(u8, evt)
+		__field(u32, uint1)
+		__field(u32, uint2)
+		__field(u64, ulong1)
+		__field(u64, ulong2)
+		__field(void *, ptr1)
+		__field(void *, ptr2)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+		__entry->evt = evt;
+		__entry->uint1 = uint1;
+		__entry->uint2 = uint2;
+		__entry->ulong1 = ulong1;
+		__entry->ulong2 = ulong2;
+		__entry->ptr1 = ptr1;
+		__entry->ptr2 = ptr2;
+	),
+
+	TP_printk("fun:%u ev:%u u1:%u u2:%u ul1:%lu ul2:%lu p1:0x%pK p2:0x%pK",
+		  __entry->func, __entry->evt,
+		  __entry->uint1, __entry->uint2,
+		  __entry->ulong1, __entry->ulong2,
+		  __entry->ptr1, __entry->ptr2)
+);
+
+#endif /* _TRACE_RMNET_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 9e92f22..950dede 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -51,6 +51,33 @@
 	TP_printk("skbaddr=%p", __entry->skbaddr)
 );
 
+TRACE_EVENT(print_skb_gso,
+
+	TP_PROTO(struct sk_buff *skb, __be16 src, __be16 dest),
+
+	TP_ARGS(skb, src, dest),
+
+	TP_STRUCT__entry(
+		__field(void *,	skbaddr)
+		__field(int, len)
+		__field(int, data_len)
+		__field(__be16, src)
+		__field(__be16, dest)
+	),
+
+	TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__entry->data_len = skb->data_len;
+		__entry->src = src;
+		__entry->dest = dest;
+	),
+
+	TP_printk("GSO: skbaddr=%pK, len=%d, data_len=%d, src=%u, dest=%u",
+		__entry->skbaddr, __entry->len, __entry->data_len,
+		be16_to_cpu(__entry->src), be16_to_cpu(__entry->dest))
+);
+
 TRACE_EVENT(skb_copy_datagram_iovec,
 
 	TP_PROTO(const struct sk_buff *skb, int len),
diff --git a/include/trace/events/wda.h b/include/trace/events/wda.h
new file mode 100644
index 0000000..15b5795
--- /dev/null
+++ b/include/trace/events/wda.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wda
+
+#if !defined(_TRACE_WDA_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WDA_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(wda_set_powersave_mode,
+
+	TP_PROTO(int enable),
+
+	TP_ARGS(enable),
+
+	TP_STRUCT__entry(
+		__field(int, enable)
+	),
+
+	TP_fast_assign(
+		__entry->enable = enable;
+	),
+
+	TP_printk("set powersave mode to %s",
+		__entry->enable ? "enable" : "disable")
+);
+
+TRACE_EVENT(wda_client_state_up,
+
+	TP_PROTO(u32 instance, u32 ep_type, u32 iface),
+
+	TP_ARGS(instance, ep_type, iface),
+
+	TP_STRUCT__entry(
+		__field(u32, instance)
+		__field(u32, ep_type)
+		__field(u32, iface)
+	),
+
+	TP_fast_assign(
+		__entry->instance = instance;
+		__entry->ep_type = ep_type;
+		__entry->iface = iface;
+	),
+
+	TP_printk("Client: Connected with WDA instance=%u ep_type=%u i_id=%u",
+		__entry->instance, __entry->ep_type, __entry->iface)
+);
+
+TRACE_EVENT(wda_client_state_down,
+
+	TP_PROTO(int from_cb),
+
+	TP_ARGS(from_cb),
+
+	TP_STRUCT__entry(
+		__field(int, from_cb)
+	),
+
+	TP_fast_assign(
+		__entry->from_cb = from_cb;
+	),
+
+	TP_printk("Client: Connection with WDA lost Exit by callback %d",
+		  __entry->from_cb)
+);
+
+#endif /* _TRACE_WDA_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 721ab7e..25193b3 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -312,6 +312,30 @@
  */
 #define DRM_FORMAT_MOD_QCOM_COMPRESSED	fourcc_mod_code(QCOM, 1)
 
+/*
+ * QTI DX Format
+ *
+ * Refers to a DX variant of the base format.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_DX	fourcc_mod_code(QCOM, 0x2)
+
+/*
+ * QTI Tight Format
+ *
+ * Refers to a tightly packed variant of the base format.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TIGHT	fourcc_mod_code(QCOM, 0x4)
+
+/*
+ * QTI Tile Format
+ *
+ * Refers to a tile variant of the base format.
+ * Implementation may be platform and base-format specific.
+ */
+#define DRM_FORMAT_MOD_QCOM_TILE	fourcc_mod_code(QCOM, 0x8)
+
 /* Vivante framebuffer modifiers */
 
 /*
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 8d67243..75c567b 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -116,6 +116,12 @@
 #define  DRM_MODE_FLAG_PIC_AR_256_135 \
 			(DRM_MODE_PICTURE_ASPECT_256_135<<19)
 
+#define  DRM_MODE_FLAG_SUPPORTS_RGB		(1<<23)
+
+#define  DRM_MODE_FLAG_SUPPORTS_YUV		(1<<24)
+
+#define  DRM_MODE_FLAG_SEAMLESS			(1<<31)
+
 #define  DRM_MODE_FLAG_ALL	(DRM_MODE_FLAG_PHSYNC |		\
 				 DRM_MODE_FLAG_NHSYNC |		\
 				 DRM_MODE_FLAG_PVSYNC |		\
@@ -475,6 +481,7 @@
 
 #define DRM_MODE_FB_INTERLACED	(1<<0) /* for interlaced framebuffers */
 #define DRM_MODE_FB_MODIFIERS	(1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_SECURE	(1<<2) /* for secure framebuffers */
 
 struct drm_mode_fb_cmd2 {
 	__u32 fb_id;
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index c06d0a5..bede892 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -26,6 +26,7 @@
 #define __MSM_DRM_H__
 
 #include "drm.h"
+#include "sde_drm.h"
 
 #if defined(__cplusplus)
 extern "C" {
@@ -67,6 +68,50 @@
 	__s64 tv_nsec;         /* nanoseconds */
 };
 
+/*
+ * HDR Metadata
+ * These are defined as per EDID spec and shall be used by the sink
+ * to set the HDR metadata for playback from userspace.
+ */
+
+#define HDR_PRIMARIES_COUNT   3
+
+/* HDR EOTF */
+#define HDR_EOTF_SDR_LUM_RANGE	0x0
+#define HDR_EOTF_HDR_LUM_RANGE	0x1
+#define HDR_EOTF_SMTPE_ST2084	0x2
+#define HDR_EOTF_HLG		0x3
+
+#define DRM_MSM_EXT_HDR_METADATA
+struct drm_msm_ext_hdr_metadata {
+	__u32 hdr_state;        /* HDR state */
+	__u32 eotf;             /* electro optical transfer function */
+	__u32 hdr_supported;    /* HDR supported */
+	__u32 display_primaries_x[HDR_PRIMARIES_COUNT]; /* Primaries x */
+	__u32 display_primaries_y[HDR_PRIMARIES_COUNT]; /* Primaries y */
+	__u32 white_point_x;    /* white_point_x */
+	__u32 white_point_y;    /* white_point_y */
+	__u32 max_luminance;    /* Max luminance */
+	__u32 min_luminance;    /* Min Luminance */
+	__u32 max_content_light_level; /* max content light level */
+	__u32 max_average_light_level; /* max average light level */
+};
+
+/**
+ * HDR sink properties
+ * These are defined as per EDID spec and shall be used by the userspace
+ * to determine the HDR properties to be set to the sink.
+ */
+#define DRM_MSM_EXT_HDR_PROPERTIES
+struct drm_msm_ext_hdr_properties {
+	__u8 hdr_metadata_type_one;   /* static metadata type one */
+	__u32 hdr_supported;          /* HDR supported */
+	__u32 hdr_eotf;               /* electro optical transfer function */
+	__u32 hdr_max_luminance;      /* Max luminance */
+	__u32 hdr_avg_luminance;      /* Avg luminance */
+	__u32 hdr_min_luminance;      /* Min Luminance */
+};
+
 #define MSM_PARAM_GPU_ID     0x01
 #define MSM_PARAM_GMEM_SIZE  0x02
 #define MSM_PARAM_CHIP_ID    0x03
@@ -259,6 +304,63 @@
 	__u32 retained;       /* out, whether backing store still exists */
 };
 
+/* HDR WRGB x and y index */
+#define DISPLAY_PRIMARIES_WX 0
+#define DISPLAY_PRIMARIES_WY 1
+#define DISPLAY_PRIMARIES_RX 2
+#define DISPLAY_PRIMARIES_RY 3
+#define DISPLAY_PRIMARIES_GX 4
+#define DISPLAY_PRIMARIES_GY 5
+#define DISPLAY_PRIMARIES_BX 6
+#define DISPLAY_PRIMARIES_BY 7
+#define DISPLAY_PRIMARIES_MAX 8
+
+struct drm_panel_hdr_properties {
+	__u32 hdr_enabled;
+
+	/* WRGB X and y values arrayed in format */
+	/* [WX, WY, RX, RY, GX, GY, BX, BY] */
+	__u32 display_primaries[DISPLAY_PRIMARIES_MAX];
+
+	/* peak brightness supported by panel */
+	__u32 peak_brightness;
+	/* Blackness level supported by panel */
+	__u32 blackness_level;
+};
+
+/**
+ * struct drm_msm_event_req - Payload to event enable/disable ioctls.
+ * @object_id: DRM object id. e.g.: for crtc pass crtc id.
+ * @object_type: DRM object type. e.g.: for crtc set it to DRM_MODE_OBJECT_CRTC.
+ * @event: Event for which notification is being enabled/disabled.
+ *         e.g.: for Histogram set - DRM_EVENT_HISTOGRAM.
+ * @client_context: Opaque pointer that will be returned during event response
+ *                  notification.
+ * @index: Object index(e.g.: crtc index), optional for user-space to set.
+ *         Driver will override value based on object_id and object_type.
+ */
+struct drm_msm_event_req {
+	__u32 object_id;
+	__u32 object_type;
+	__u32 event;
+	__u64 client_context;
+	__u32 index;
+};
+
+/**
+ * struct drm_msm_event_resp - payload returned when read is called for
+ *                            custom notifications.
+ * @base: Event type and length of complete notification payload.
+ * @info: Contains information about DRM that which raised this event.
+ * @data: Custom payload that driver returns for event type.
+ *        size of data = base.length - (sizeof(base) + sizeof(info))
+ */
+struct drm_msm_event_resp {
+	struct drm_event base;
+	struct drm_msm_event_req info;
+	__u8 data[];
+};
+
 /*
  * Draw queues allow the user to set specific submission parameter. Command
  * submissions specify a specific submitqueue to use.  ID 0 is reserved for
@@ -289,6 +391,20 @@
  */
 #define DRM_MSM_SUBMITQUEUE_NEW        0x0A
 #define DRM_MSM_SUBMITQUEUE_CLOSE      0x0B
+#define DRM_SDE_WB_CONFIG              0x40
+#define DRM_MSM_REGISTER_EVENT         0x41
+#define DRM_MSM_DEREGISTER_EVENT       0x42
+#define DRM_MSM_RMFB2                  0x43
+
+/* sde custom events */
+#define DRM_EVENT_HISTOGRAM 0x80000000
+#define DRM_EVENT_AD_BACKLIGHT 0x80000001
+#define DRM_EVENT_CRTC_POWER 0x80000002
+#define DRM_EVENT_SYS_BACKLIGHT 0x80000003
+#define DRM_EVENT_SDE_POWER 0x80000004
+#define DRM_EVENT_IDLE_NOTIFY 0x80000005
+#define DRM_EVENT_PANEL_DEAD 0x80000006 /* ESD event */
+#define DRM_EVENT_SDE_HW_RECOVERY 0X80000007
 
 #define DRM_IOCTL_MSM_GET_PARAM        DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
 #define DRM_IOCTL_MSM_GEM_NEW          DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
@@ -298,6 +414,14 @@
 #define DRM_IOCTL_MSM_GEM_SUBMIT       DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
 #define DRM_IOCTL_MSM_WAIT_FENCE       DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
 #define DRM_IOCTL_MSM_GEM_MADVISE      DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
+#define DRM_IOCTL_SDE_WB_CONFIG \
+	DRM_IOW((DRM_COMMAND_BASE + DRM_SDE_WB_CONFIG), struct sde_drm_wb_cfg)
+#define DRM_IOCTL_MSM_REGISTER_EVENT   DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_REGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_DEREGISTER_EVENT DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_DEREGISTER_EVENT), struct drm_msm_event_req)
+#define DRM_IOCTL_MSM_RMFB2 DRM_IOW((DRM_COMMAND_BASE + \
+			DRM_MSM_RMFB2), unsigned int)
 #define DRM_IOCTL_MSM_SUBMITQUEUE_NEW    DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
 #define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE  DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
 
diff --git a/include/uapi/drm/msm_drm_pp.h b/include/uapi/drm/msm_drm_pp.h
index f0d13a5..cf22867 100644
--- a/include/uapi/drm/msm_drm_pp.h
+++ b/include/uapi/drm/msm_drm_pp.h
@@ -441,4 +441,23 @@
 	__u32 matrix[DITHER_MATRIX_SZ];
 };
 
+/**
+ * struct drm_msm_ad4_roi_cfg - ad4 roi params config set
+ * by user-space client.
+ * @h_x - hotizontal direction start
+ * @h_y - hotizontal direction end
+ * @v_x - vertical direction start
+ * @v_y - vertical direction end
+ * @factor_in - the alpha value for inside roi region
+ * @factor_out - the alpha value for outside roi region
+ */
+#define DRM_MSM_AD4_ROI
+struct drm_msm_ad4_roi_cfg {
+	__u32 h_x;
+	__u32 h_y;
+	__u32 v_x;
+	__u32 v_y;
+	__u32 factor_in;
+	__u32 factor_out;
+};
 #endif /* _MSM_DRM_PP_H_ */
diff --git a/include/uapi/drm/sde_drm.h b/include/uapi/drm/sde_drm.h
index 8753b76..670a03b 100644
--- a/include/uapi/drm/sde_drm.h
+++ b/include/uapi/drm/sde_drm.h
@@ -245,7 +245,10 @@
  * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
  * @uv_sep_lut_idx:    UV separable LUT index
  * @de:                Detail enhancer settings
+ * @dir_weight:        Directional Weight
+ * @unsharp_mask_blend: Unsharp Blend Filter Ratio
  */
+#define SDE_DRM_QSEED3LITE
 struct sde_drm_scaler_v2 {
 	/*
 	 * General definitions
@@ -298,6 +301,8 @@
 	 * Detail enhancer settings
 	 */
 	struct sde_drm_de_v1 de;
+	uint32_t dir_weight;
+	uint32_t unsharp_mask_blend;
 };
 
 /* Number of dest scalers supported */
@@ -451,4 +456,11 @@
 #define SDE_MODE_DPMS_SUSPEND	4
 #define SDE_MODE_DPMS_OFF	5
 
+/**
+ * sde recovery events for notifying client
+ */
+#define SDE_RECOVERY_SUCCESS		0
+#define SDE_RECOVERY_CAPTURE		1
+#define SDE_RECOVERY_HARD_RESET		2
+
 #endif /* _SDE_DRM_H_ */
diff --git a/include/uapi/linux/msm_rmnet.h b/include/uapi/linux/msm_rmnet.h
new file mode 100644
index 0000000..0a494f3
--- /dev/null
+++ b/include/uapi/linux/msm_rmnet.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_MSM_RMNET_H_
+#define _UAPI_MSM_RMNET_H_
+
+/* Bitmap macros for RmNET driver operation mode. */
+#define RMNET_MODE_NONE     (0x00)
+#define RMNET_MODE_LLP_ETH  (0x01)
+#define RMNET_MODE_LLP_IP   (0x02)
+#define RMNET_MODE_QOS      (0x04)
+#define RMNET_MODE_MASK     (RMNET_MODE_LLP_ETH | \
+			     RMNET_MODE_LLP_IP  | \
+			     RMNET_MODE_QOS)
+
+#define RMNET_IS_MODE_QOS(mode)  \
+	((mode & RMNET_MODE_QOS) == RMNET_MODE_QOS)
+#define RMNET_IS_MODE_IP(mode)   \
+	((mode & RMNET_MODE_LLP_IP) == RMNET_MODE_LLP_IP)
+
+/* IOCTL commands
+ * Values chosen to not conflict with other drivers in the ecosystem
+ */
+
+#define RMNET_IOCTL_SET_LLP_ETHERNET 0x000089F1 /* Set Ethernet protocol  */
+#define RMNET_IOCTL_SET_LLP_IP       0x000089F2 /* Set RAWIP protocol     */
+#define RMNET_IOCTL_GET_LLP          0x000089F3 /* Get link protocol      */
+#define RMNET_IOCTL_SET_QOS_ENABLE   0x000089F4 /* Set QoS header enabled */
+#define RMNET_IOCTL_SET_QOS_DISABLE  0x000089F5 /* Set QoS header disabled*/
+#define RMNET_IOCTL_GET_QOS          0x000089F6 /* Get QoS header state   */
+#define RMNET_IOCTL_GET_OPMODE       0x000089F7 /* Get operation mode     */
+#define RMNET_IOCTL_OPEN             0x000089F8 /* Open transport port    */
+#define RMNET_IOCTL_CLOSE            0x000089F9 /* Close transport port   */
+#define RMNET_IOCTL_FLOW_ENABLE      0x000089FA /* Flow enable            */
+#define RMNET_IOCTL_FLOW_DISABLE     0x000089FB /* Flow disable           */
+#define RMNET_IOCTL_FLOW_SET_HNDL    0x000089FC /* Set flow handle        */
+#define RMNET_IOCTL_EXTENDED         0x000089FD /* Extended IOCTLs        */
+
+/* RmNet Data Required IOCTLs */
+#define RMNET_IOCTL_GET_SUPPORTED_FEATURES     0x0000   /* Get features    */
+#define RMNET_IOCTL_SET_MRU                    0x0001   /* Set MRU         */
+#define RMNET_IOCTL_GET_MRU                    0x0002   /* Get MRU         */
+#define RMNET_IOCTL_GET_EPID                   0x0003   /* Get endpoint ID */
+#define RMNET_IOCTL_GET_DRIVER_NAME            0x0004   /* Get driver name */
+#define RMNET_IOCTL_ADD_MUX_CHANNEL            0x0005   /* Add MUX ID      */
+#define RMNET_IOCTL_SET_EGRESS_DATA_FORMAT     0x0006   /* Set EDF         */
+#define RMNET_IOCTL_SET_INGRESS_DATA_FORMAT    0x0007   /* Set IDF         */
+#define RMNET_IOCTL_SET_AGGREGATION_COUNT      0x0008   /* Set agg count   */
+#define RMNET_IOCTL_GET_AGGREGATION_COUNT      0x0009   /* Get agg count   */
+#define RMNET_IOCTL_SET_AGGREGATION_SIZE       0x000A   /* Set agg size    */
+#define RMNET_IOCTL_GET_AGGREGATION_SIZE       0x000B   /* Get agg size    */
+#define RMNET_IOCTL_FLOW_CONTROL               0x000C   /* Do flow control */
+#define RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL   0x000D   /* For legacy use  */
+#define RMNET_IOCTL_GET_HWSW_MAP               0x000E   /* Get HW/SW map   */
+#define RMNET_IOCTL_SET_RX_HEADROOM            0x000F   /* RX Headroom     */
+#define RMNET_IOCTL_GET_EP_PAIR                0x0010   /* Endpoint pair   */
+#define RMNET_IOCTL_SET_QOS_VERSION            0x0011   /* 8/6 byte QoS hdr*/
+#define RMNET_IOCTL_GET_QOS_VERSION            0x0012   /* 8/6 byte QoS hdr*/
+#define RMNET_IOCTL_GET_SUPPORTED_QOS_MODES    0x0013   /* Get QoS modes   */
+#define RMNET_IOCTL_SET_SLEEP_STATE            0x0014   /* Set sleep state */
+#define RMNET_IOCTL_SET_XLAT_DEV_INFO          0x0015   /* xlat dev name   */
+#define RMNET_IOCTL_DEREGISTER_DEV             0x0016   /* Dereg a net dev */
+#define RMNET_IOCTL_GET_SG_SUPPORT             0x0017   /* Query sg support*/
+
+/* Return values for the RMNET_IOCTL_GET_SUPPORTED_FEATURES IOCTL */
+#define RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL              (1<<0)
+#define RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT          (1<<1)
+#define RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT         (1<<2)
+#define RMNET_IOCTL_FEAT_SET_AGGREGATION_COUNT           (1<<3)
+#define RMNET_IOCTL_FEAT_GET_AGGREGATION_COUNT           (1<<4)
+#define RMNET_IOCTL_FEAT_SET_AGGREGATION_SIZE            (1<<5)
+#define RMNET_IOCTL_FEAT_GET_AGGREGATION_SIZE            (1<<6)
+#define RMNET_IOCTL_FEAT_FLOW_CONTROL                    (1<<7)
+#define RMNET_IOCTL_FEAT_GET_DFLT_CONTROL_CHANNEL        (1<<8)
+#define RMNET_IOCTL_FEAT_GET_HWSW_MAP                    (1<<9)
+
+/* Input values for the RMNET_IOCTL_SET_EGRESS_DATA_FORMAT IOCTL  */
+#define RMNET_IOCTL_EGRESS_FORMAT_MAP                  (1<<1)
+#define RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION          (1<<2)
+#define RMNET_IOCTL_EGRESS_FORMAT_MUXING               (1<<3)
+#define RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM             (1<<4)
+
+/* Input values for the RMNET_IOCTL_SET_INGRESS_DATA_FORMAT IOCTL */
+#define RMNET_IOCTL_INGRESS_FORMAT_MAP                 (1<<1)
+#define RMNET_IOCTL_INGRESS_FORMAT_DEAGGREGATION       (1<<2)
+#define RMNET_IOCTL_INGRESS_FORMAT_DEMUXING            (1<<3)
+#define RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM            (1<<4)
+#define RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA            (1<<5)
+
+/* User space may not have this defined. */
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+struct rmnet_ioctl_extended_s {
+	uint32_t   extended_ioctl;
+	union {
+		uint32_t data; /* Generic data field for most extended IOCTLs */
+
+		/* Return values for
+		 *    RMNET_IOCTL_GET_DRIVER_NAME
+		 *    RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL
+		 */
+		int8_t if_name[IFNAMSIZ];
+
+		/* Input values for the RMNET_IOCTL_ADD_MUX_CHANNEL IOCTL */
+		struct {
+			uint32_t  mux_id;
+			int8_t    vchannel_name[IFNAMSIZ];
+		} rmnet_mux_val;
+
+		/* Input values for the RMNET_IOCTL_FLOW_CONTROL IOCTL */
+		struct {
+			uint8_t   flow_mode;
+			uint8_t   mux_id;
+		} flow_control_prop;
+
+		/* Return values for RMNET_IOCTL_GET_EP_PAIR */
+		struct {
+			uint32_t   consumer_pipe_num;
+			uint32_t   producer_pipe_num;
+		} ipa_ep_pair;
+
+		struct {
+			uint32_t __data; /* Placeholder for legacy data*/
+			uint32_t agg_size;
+			uint32_t agg_count;
+		} ingress_format;
+	} u;
+};
+
+struct rmnet_ioctl_data_s {
+	union {
+		uint32_t	operation_mode;
+		uint32_t	tcm_handle;
+	} u;
+};
+
+#define RMNET_IOCTL_QOS_MODE_6   (1<<0)
+#define RMNET_IOCTL_QOS_MODE_8   (1<<1)
+
+/* QMI QoS header definition */
+struct QMI_QOS_HDR_S {
+	unsigned char    version;
+	unsigned char    flags;
+	uint32_t         flow_id;
+} __attribute((__packed__));
+
+/* QMI QoS 8-byte header. */
+struct qmi_qos_hdr8_s {
+	struct QMI_QOS_HDR_S   hdr;
+	uint8_t                reserved[2];
+} __attribute((__packed__));
+
+#endif /* _UAPI_MSM_RMNET_H_ */
diff --git a/include/uapi/media/Kbuild b/include/uapi/media/Kbuild
new file mode 100644
index 0000000..6a5f1b8
--- /dev/null
+++ b/include/uapi/media/Kbuild
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+
+header-y += cam_cpas.h
+header-y += cam_defs.h
+header-y += cam_fd.h
+header-y += cam_icp.h
+header-y += cam_isp.h
+header-y += cam_isp_vfe.h
+header-y += cam_isp_ife.h
+header-y += cam_jpeg.h
+header-y += cam_req_mgr.h
+header-y += cam_sensor.h
+header-y += cam_sync.h
+header-y += cam_lrme.h
diff --git a/include/uapi/media/cam_cpas.h b/include/uapi/media/cam_cpas.h
new file mode 100644
index 0000000..80f1e76
--- /dev/null
+++ b/include/uapi/media/cam_cpas.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_CPAS_H__
+#define __UAPI_CAM_CPAS_H__
+
+#include "cam_defs.h"
+
+#define CAM_FAMILY_CAMERA_SS     1
+#define CAM_FAMILY_CPAS_SS       2
+
+/**
+ * struct cam_cpas_query_cap - CPAS query device capability payload
+ *
+ * @camera_family     : Camera family type
+ * @reserved          : Reserved field for alignment
+ * @camera_version    : Camera platform version
+ * @cpas_version      : Camera CPAS version within camera platform
+ *
+ */
+struct cam_cpas_query_cap {
+	uint32_t                 camera_family;
+	uint32_t                 reserved;
+	struct cam_hw_version    camera_version;
+	struct cam_hw_version    cpas_version;
+};
+
+#endif /* __UAPI_CAM_CPAS_H__ */
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
new file mode 100644
index 0000000..2dadee1
--- /dev/null
+++ b/include/uapi/media/cam_defs.h
@@ -0,0 +1,634 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_DEFS_H__
+#define __UAPI_CAM_DEFS_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+
+/* camera op codes */
+#define CAM_COMMON_OPCODE_BASE                  0x100
+#define CAM_QUERY_CAP                           (CAM_COMMON_OPCODE_BASE + 0x1)
+#define CAM_ACQUIRE_DEV                         (CAM_COMMON_OPCODE_BASE + 0x2)
+#define CAM_START_DEV                           (CAM_COMMON_OPCODE_BASE + 0x3)
+#define CAM_STOP_DEV                            (CAM_COMMON_OPCODE_BASE + 0x4)
+#define CAM_CONFIG_DEV                          (CAM_COMMON_OPCODE_BASE + 0x5)
+#define CAM_RELEASE_DEV                         (CAM_COMMON_OPCODE_BASE + 0x6)
+#define CAM_SD_SHUTDOWN                         (CAM_COMMON_OPCODE_BASE + 0x7)
+#define CAM_FLUSH_REQ                           (CAM_COMMON_OPCODE_BASE + 0x8)
+#define CAM_COMMON_OPCODE_MAX                   (CAM_COMMON_OPCODE_BASE + 0x9)
+
+#define CAM_COMMON_OPCODE_BASE_v2           0x150
+#define CAM_ACQUIRE_HW                      (CAM_COMMON_OPCODE_BASE_v2 + 0x1)
+#define CAM_RELEASE_HW                      (CAM_COMMON_OPCODE_BASE_v2 + 0x2)
+
+#define CAM_EXT_OPCODE_BASE                     0x200
+#define CAM_CONFIG_DEV_EXTERNAL                 (CAM_EXT_OPCODE_BASE + 0x1)
+
+/* camera handle type */
+#define CAM_HANDLE_USER_POINTER                 1
+#define CAM_HANDLE_MEM_HANDLE                   2
+
+/* Generic Blob CmdBuffer header properties */
+#define CAM_GENERIC_BLOB_CMDBUFFER_SIZE_MASK    0xFFFFFF00
+#define CAM_GENERIC_BLOB_CMDBUFFER_SIZE_SHIFT   8
+#define CAM_GENERIC_BLOB_CMDBUFFER_TYPE_MASK    0xFF
+#define CAM_GENERIC_BLOB_CMDBUFFER_TYPE_SHIFT   0
+
+/* Command Buffer Types */
+#define CAM_CMD_BUF_DMI                     0x1
+#define CAM_CMD_BUF_DMI16                   0x2
+#define CAM_CMD_BUF_DMI32                   0x3
+#define CAM_CMD_BUF_DMI64                   0x4
+#define CAM_CMD_BUF_DIRECT                  0x5
+#define CAM_CMD_BUF_INDIRECT                0x6
+#define CAM_CMD_BUF_I2C                     0x7
+#define CAM_CMD_BUF_FW                      0x8
+#define CAM_CMD_BUF_GENERIC                 0x9
+#define CAM_CMD_BUF_LEGACY                  0xA
+
+/* UBWC API Version */
+#define CAM_UBWC_CFG_VERSION_1              1
+
+/**
+ * enum flush_type_t - Identifies the various flush types
+ *
+ * @CAM_FLUSH_TYPE_REQ:    Flush specific request
+ * @CAM_FLUSH_TYPE_ALL:    Flush all requests belonging to a context
+ * @CAM_FLUSH_TYPE_MAX:    Max enum to validate flush type
+ *
+ */
+enum flush_type_t {
+	CAM_FLUSH_TYPE_REQ,
+	CAM_FLUSH_TYPE_ALL,
+	CAM_FLUSH_TYPE_MAX
+};
+
+/**
+ * struct cam_control - Structure used by ioctl control for camera
+ *
+ * @op_code:            This is the op code for camera control
+ * @size:               Control command size
+ * @handle_type:        User pointer or shared memory handle
+ * @reserved:           Reserved field for 64 bit alignment
+ * @handle:             Control command payload
+ */
+struct cam_control {
+	uint32_t        op_code;
+	uint32_t        size;
+	uint32_t        handle_type;
+	uint32_t        reserved;
+	uint64_t        handle;
+};
+
+/* camera IOCTL */
+#define VIDIOC_CAM_CONTROL \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct cam_control)
+
+/**
+ * struct cam_hw_version - Structure for HW version of camera devices
+ *
+ * @major    : Hardware version major
+ * @minor    : Hardware version minor
+ * @incr     : Hardware version increment
+ * @reserved : Reserved for 64 bit aligngment
+ */
+struct cam_hw_version {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t incr;
+	uint32_t reserved;
+};
+
+/**
+ * struct cam_iommu_handle - Structure for IOMMU handles of camera hw devices
+ *
+ * @non_secure: Device Non Secure IOMMU handle
+ * @secure:     Device Secure IOMMU handle
+ *
+ */
+struct cam_iommu_handle {
+	int32_t non_secure;
+	int32_t secure;
+};
+
+/* camera secure mode */
+#define CAM_SECURE_MODE_NON_SECURE             0
+#define CAM_SECURE_MODE_SECURE                 1
+
+/* Camera Format Type */
+#define CAM_FORMAT_BASE                         0
+#define CAM_FORMAT_MIPI_RAW_6                   1
+#define CAM_FORMAT_MIPI_RAW_8                   2
+#define CAM_FORMAT_MIPI_RAW_10                  3
+#define CAM_FORMAT_MIPI_RAW_12                  4
+#define CAM_FORMAT_MIPI_RAW_14                  5
+#define CAM_FORMAT_MIPI_RAW_16                  6
+#define CAM_FORMAT_MIPI_RAW_20                  7
+#define CAM_FORMAT_QTI_RAW_8                    8
+#define CAM_FORMAT_QTI_RAW_10                   9
+#define CAM_FORMAT_QTI_RAW_12                   10
+#define CAM_FORMAT_QTI_RAW_14                   11
+#define CAM_FORMAT_PLAIN8                       12
+#define CAM_FORMAT_PLAIN16_8                    13
+#define CAM_FORMAT_PLAIN16_10                   14
+#define CAM_FORMAT_PLAIN16_12                   15
+#define CAM_FORMAT_PLAIN16_14                   16
+#define CAM_FORMAT_PLAIN16_16                   17
+#define CAM_FORMAT_PLAIN32_20                   18
+#define CAM_FORMAT_PLAIN64                      19
+#define CAM_FORMAT_PLAIN128                     20
+#define CAM_FORMAT_ARGB                         21
+#define CAM_FORMAT_ARGB_10                      22
+#define CAM_FORMAT_ARGB_12                      23
+#define CAM_FORMAT_ARGB_14                      24
+#define CAM_FORMAT_DPCM_10_6_10                 25
+#define CAM_FORMAT_DPCM_10_8_10                 26
+#define CAM_FORMAT_DPCM_12_6_12                 27
+#define CAM_FORMAT_DPCM_12_8_12                 28
+#define CAM_FORMAT_DPCM_14_8_14                 29
+#define CAM_FORMAT_DPCM_14_10_14                30
+#define CAM_FORMAT_NV21                         31
+#define CAM_FORMAT_NV12                         32
+#define CAM_FORMAT_TP10                         33
+#define CAM_FORMAT_YUV422                       34
+#define CAM_FORMAT_PD8                          35
+#define CAM_FORMAT_PD10                         36
+#define CAM_FORMAT_UBWC_NV12                    37
+#define CAM_FORMAT_UBWC_NV12_4R                 38
+#define CAM_FORMAT_UBWC_TP10                    39
+#define CAM_FORMAT_UBWC_P010                    40
+#define CAM_FORMAT_PLAIN8_SWAP                  41
+#define CAM_FORMAT_PLAIN8_10                    42
+#define CAM_FORMAT_PLAIN8_10_SWAP               43
+#define CAM_FORMAT_YV12                         44
+#define CAM_FORMAT_Y_ONLY                       45
+#define CAM_FORMAT_MAX                          46
+
+/* camera rotaion */
+#define CAM_ROTATE_CW_0_DEGREE                  0
+#define CAM_ROTATE_CW_90_DEGREE                 1
+#define CAM_RORATE_CW_180_DEGREE                2
+#define CAM_ROTATE_CW_270_DEGREE                3
+
+/* camera Color Space */
+#define CAM_COLOR_SPACE_BASE                    0
+#define CAM_COLOR_SPACE_BT601_FULL              1
+#define CAM_COLOR_SPACE_BT601625                2
+#define CAM_COLOR_SPACE_BT601525                3
+#define CAM_COLOR_SPACE_BT709                   4
+#define CAM_COLOR_SPACE_DEPTH                   5
+#define CAM_COLOR_SPACE_MAX                     6
+
+/* camera buffer direction */
+#define CAM_BUF_INPUT                           1
+#define CAM_BUF_OUTPUT                          2
+#define CAM_BUF_IN_OUT                          3
+
+/* camera packet device Type */
+#define CAM_PACKET_DEV_BASE                     0
+#define CAM_PACKET_DEV_IMG_SENSOR               1
+#define CAM_PACKET_DEV_ACTUATOR                 2
+#define CAM_PACKET_DEV_COMPANION                3
+#define CAM_PACKET_DEV_EEPOM                    4
+#define CAM_PACKET_DEV_CSIPHY                   5
+#define CAM_PACKET_DEV_OIS                      6
+#define CAM_PACKET_DEV_FLASH                    7
+#define CAM_PACKET_DEV_FD                       8
+#define CAM_PACKET_DEV_JPEG_ENC                 9
+#define CAM_PACKET_DEV_JPEG_DEC                 10
+#define CAM_PACKET_DEV_VFE                      11
+#define CAM_PACKET_DEV_CPP                      12
+#define CAM_PACKET_DEV_CSID                     13
+#define CAM_PACKET_DEV_ISPIF                    14
+#define CAM_PACKET_DEV_IFE                      15
+#define CAM_PACKET_DEV_ICP                      16
+#define CAM_PACKET_DEV_LRME                     17
+#define CAM_PACKET_DEV_MAX                      18
+
+
+/* constants */
+#define CAM_PACKET_MAX_PLANES                   3
+
+/**
+ * struct cam_plane_cfg - Plane configuration info
+ *
+ * @width:                      Plane width in pixels
+ * @height:                     Plane height in lines
+ * @plane_stride:               Plane stride in pixel
+ * @slice_height:               Slice height in line (not used by ISP)
+ * @meta_stride:                UBWC metadata stride
+ * @meta_size:                  UBWC metadata plane size
+ * @meta_offset:                UBWC metadata offset
+ * @packer_config:              UBWC packer config
+ * @mode_config:                UBWC mode config
+ * @tile_config:                UBWC tile config
+ * @h_init:                     UBWC horizontal initial coordinate in pixels
+ * @v_init:                     UBWC vertical initial coordinate in lines
+ *
+ */
+struct cam_plane_cfg {
+	uint32_t                width;
+	uint32_t                height;
+	uint32_t                plane_stride;
+	uint32_t                slice_height;
+	uint32_t                meta_stride;
+	uint32_t                meta_size;
+	uint32_t                meta_offset;
+	uint32_t                packer_config;
+	uint32_t                mode_config;
+	uint32_t                tile_config;
+	uint32_t                h_init;
+	uint32_t                v_init;
+};
+
+/**
+ * struct cam_ubwc_plane_cfg_v1 - UBWC Plane configuration info
+ *
+ * @port_type:                  Port Type
+ * @meta_stride:                UBWC metadata stride
+ * @meta_size:                  UBWC metadata plane size
+ * @meta_offset:                UBWC metadata offset
+ * @packer_config:              UBWC packer config
+ * @mode_config_0:              UBWC mode config 0
+ * @mode_config_1:              UBWC 3 mode config 1
+ * @tile_config:                UBWC tile config
+ * @h_init:                     UBWC horizontal initial coordinate in pixels
+ * @v_init:                     UBWC vertical initial coordinate in lines
+ *
+ */
+struct cam_ubwc_plane_cfg_v1 {
+	uint32_t                port_type;
+	uint32_t                meta_stride;
+	uint32_t                meta_size;
+	uint32_t                meta_offset;
+	uint32_t                packer_config;
+	uint32_t                mode_config_0;
+	uint32_t                mode_config_1;
+	uint32_t                tile_config;
+	uint32_t                h_init;
+	uint32_t                v_init;
+};
+
+/**
+ * struct cam_cmd_buf_desc - Command buffer descriptor
+ *
+ * @mem_handle:                 Command buffer handle
+ * @offset:                     Command start offset
+ * @size:                       Size of the command buffer in bytes
+ * @length:                     Used memory in command buffer in bytes
+ * @type:                       Type of the command buffer
+ * @meta_data:                  Data type for private command buffer
+ *                              Between UMD and KMD
+ *
+ */
+struct cam_cmd_buf_desc {
+	int32_t                 mem_handle;
+	uint32_t                offset;
+	uint32_t                size;
+	uint32_t                length;
+	uint32_t                type;
+	uint32_t                meta_data;
+};
+
+/**
+ * struct cam_buf_io_cfg - Buffer io configuration for buffers
+ *
+ * @mem_handle:                 Mem_handle array for the buffers.
+ * @offsets:                    Offsets for each planes in the buffer
+ * @planes:                     Per plane information
+ * @width:                      Main plane width in pixel
+ * @height:                     Main plane height in lines
+ * @format:                     Format of the buffer
+ * @color_space:                Color space for the buffer
+ * @color_pattern:              Color pattern in the buffer
+ * @bpp:                        Bit per pixel
+ * @rotation:                   Rotation information for the buffer
+ * @resource_type:              Resource type associated with the buffer
+ * @fence:                      Fence handle
+ * @early_fence:                Fence handle for early signal
+ * @aux_cmd_buf:                An auxiliary command buffer that may be
+ *                              used for programming the IO
+ * @direction:                  Direction of the config
+ * @batch_size:                 Batch size in HFR mode
+ * @subsample_pattern:          Subsample pattern. Used in HFR mode. It
+ *                              should be consistent with batchSize and
+ *                              CAMIF programming.
+ * @subsample_period:           Subsample period. Used in HFR mode. It
+ *                              should be consistent with batchSize and
+ *                              CAMIF programming.
+ * @framedrop_pattern:          Framedrop pattern
+ * @framedrop_period:           Framedrop period
+ * @flag:                       Flags for extra information
+ * @direction:                  Buffer direction: input or output
+ * @padding:                    Padding for the structure
+ *
+ */
+struct cam_buf_io_cfg {
+	int32_t                         mem_handle[CAM_PACKET_MAX_PLANES];
+	uint32_t                        offsets[CAM_PACKET_MAX_PLANES];
+	struct cam_plane_cfg            planes[CAM_PACKET_MAX_PLANES];
+	uint32_t                        format;
+	uint32_t                        color_space;
+	uint32_t                        color_pattern;
+	uint32_t                        bpp;
+	uint32_t                        rotation;
+	uint32_t                        resource_type;
+	int32_t                         fence;
+	int32_t                         early_fence;
+	struct cam_cmd_buf_desc         aux_cmd_buf;
+	uint32_t                        direction;
+	uint32_t                        batch_size;
+	uint32_t                        subsample_pattern;
+	uint32_t                        subsample_period;
+	uint32_t                        framedrop_pattern;
+	uint32_t                        framedrop_period;
+	uint32_t                        flag;
+	uint32_t                        padding;
+};
+
+/**
+ * struct cam_packet_header - Camera packet header
+ *
+ * @op_code:                    Camera packet opcode
+ * @size:                       Size of the camera packet in bytes
+ * @request_id:                 Request id for this camera packet
+ * @flags:                      Flags for the camera packet
+ * @padding:                    Padding
+ *
+ */
+struct cam_packet_header {
+	uint32_t                op_code;
+	uint32_t                size;
+	uint64_t                request_id;
+	uint32_t                flags;
+	uint32_t                padding;
+};
+
+/**
+ * struct cam_patch_desc - Patch structure
+ *
+ * @dst_buf_hdl:                Memory handle for the dest buffer
+ * @dst_offset:                 Offset byte in the dest buffer
+ * @src_buf_hdl:                Memory handle for the source buffer
+ * @src_offset:                 Offset byte in the source buffer
+ *
+ */
+struct cam_patch_desc {
+	int32_t                 dst_buf_hdl;
+	uint32_t                dst_offset;
+	int32_t                 src_buf_hdl;
+	uint32_t                src_offset;
+};
+
+/**
+ * struct cam_packet - Camera packet structure
+ *
+ * @header:                     Camera packet header
+ * @cmd_buf_offset:             Command buffer start offset
+ * @num_cmd_buf:                Number of the command buffer in the packet
+ * @io_config_offset:           Buffer io configuration start offset
+ * @num_io_configs:             Number of the buffer io configurations
+ * @patch_offset:               Patch offset for the patch structure
+ * @num_patches:                Number of the patch structure
+ * @kmd_cmd_buf_index:          Command buffer index which contains extra
+ *                              space for the KMD buffer
+ * @kmd_cmd_buf_offset:         Offset from the beginning of the command
+ *                              buffer for KMD usage.
+ * @payload:                    Camera packet payload
+ *
+ */
+struct cam_packet {
+	struct cam_packet_header        header;
+	uint32_t                        cmd_buf_offset;
+	uint32_t                        num_cmd_buf;
+	uint32_t                        io_configs_offset;
+	uint32_t                        num_io_configs;
+	uint32_t                        patch_offset;
+	uint32_t                        num_patches;
+	uint32_t                        kmd_cmd_buf_index;
+	uint32_t                        kmd_cmd_buf_offset;
+	uint64_t                        payload[1];
+
+};
+
+/**
+ * struct cam_release_dev_cmd - Control payload for release devices
+ *
+ * @session_handle:             Session handle for the release
+ * @dev_handle:                 Device handle for the release
+ */
+struct cam_release_dev_cmd {
+	int32_t                 session_handle;
+	int32_t                 dev_handle;
+};
+
+/**
+ * struct cam_start_stop_dev_cmd - Control payload for start/stop device
+ *
+ * @session_handle:             Session handle for the start/stop command
+ * @dev_handle:                 Device handle for the start/stop command
+ *
+ */
+struct cam_start_stop_dev_cmd {
+	int32_t                 session_handle;
+	int32_t                 dev_handle;
+};
+
+/**
+ * struct cam_config_dev_cmd - Command payload for configure device
+ *
+ * @session_handle:             Session handle for the command
+ * @dev_handle:                 Device handle for the command
+ * @offset:                     Offset byte in the packet handle.
+ * @packet_handle:              Packet memory handle for the actual packet:
+ *                              struct cam_packet.
+ *
+ */
+struct cam_config_dev_cmd {
+	int32_t                 session_handle;
+	int32_t                 dev_handle;
+	uint64_t                offset;
+	uint64_t                packet_handle;
+};
+
+/**
+ * struct cam_query_cap_cmd - Payload for query device capability
+ *
+ * @size:               Handle size
+ * @handle_type:        User pointer or shared memory handle
+ * @caps_handle:        Device specific query command payload
+ *
+ */
+struct cam_query_cap_cmd {
+	uint32_t        size;
+	uint32_t        handle_type;
+	uint64_t        caps_handle;
+};
+
+/**
+ * struct cam_acquire_dev_cmd - Control payload for acquire devices
+ *
+ * @session_handle:     Session handle for the acquire command
+ * @dev_handle:         Device handle to be returned
+ * @handle_type:        Resource handle type:
+ *                      1 = user pointer, 2 = mem handle
+ * @num_resources:      Number of the resources to be acquired
+ * @resources_hdl:      Resource handle that refers to the actual
+ *                      resource array. Each item in this
+ *                      array is device specific resource structure
+ *
+ */
+struct cam_acquire_dev_cmd {
+	int32_t         session_handle;
+	int32_t         dev_handle;
+	uint32_t        handle_type;
+	uint32_t        num_resources;
+	uint64_t        resource_hdl;
+};
+
+/*
+ * In old version, while acquiring device the num_resources in
+ * struct cam_acquire_dev_cmd will be a valid value. During ACQUIRE_DEV
+ * KMD driver will return dev_handle as well as associate HW to handle.
+ * If num_resources is set to the constant below, we are using
+ * the new version and we do not acquire HW in ACQUIRE_DEV IOCTL.
+ * ACQUIRE_DEV will only return handle and we should receive
+ * ACQUIRE_HW IOCTL after ACQUIRE_DEV and that is when the HW
+ * is associated with the dev_handle.
+ *
+ * (Data type): uint32_t
+ */
+#define CAM_API_COMPAT_CONSTANT                   0xFEFEFEFE
+
+#define CAM_ACQUIRE_HW_STRUCT_VERSION_1           1
+
+/**
+ * struct cam_acquire_hw_cmd_v1 - Control payload for acquire HW IOCTL (Ver 1)
+ *
+ * @struct_version:     = CAM_ACQUIRE_HW_STRUCT_VERSION_1 for this struct
+ *                      This value should be the first 32-bits in any structure
+ *                      related to this IOCTL. So that if the struct needs to
+ *                      change, we can first read the starting 32-bits, get the
+ *                      version number and then typecast the data to struct
+ *                      accordingly.
+ * @reserved:           Reserved field for 64-bit alignment
+ * @session_handle:     Session handle for the acquire command
+ * @dev_handle:         Device handle to be returned
+ * @handle_type:        Tells you how to interpret the variable resource_hdl-
+ *                      1 = user pointer, 2 = mem handle
+ * @data_size:          Total size of data contained in memory pointed
+ *                      to by resource_hdl
+ * @resource_hdl:       Resource handle that refers to the actual
+ *                      resource data.
+ */
+struct cam_acquire_hw_cmd_v1 {
+	uint32_t        struct_version;
+	uint32_t        reserved;
+	int32_t         session_handle;
+	int32_t         dev_handle;
+	uint32_t        handle_type;
+	uint32_t        data_size;
+	uint64_t        resource_hdl;
+};
+
+#define CAM_RELEASE_HW_STRUCT_VERSION_1           1
+
+/**
+ * struct cam_release_hw_cmd_v1 - Control payload for release HW IOCTL (Ver 1)
+ *
+ * @struct_version:     = CAM_RELEASE_HW_STRUCT_VERSION_1 for this struct
+ *                      This value should be the first 32-bits in any structure
+ *                      related to this IOCTL. So that if the struct needs to
+ *                      change, we can first read the starting 32-bits, get the
+ *                      version number and then typecast the data to struct
+ *                      accordingly.
+ * @reserved:           Reserved field for 64-bit alignment
+ * @session_handle:     Session handle for the release
+ * @dev_handle:         Device handle for the release
+ */
+struct cam_release_hw_cmd_v1 {
+	uint32_t                struct_version;
+	uint32_t                reserved;
+	int32_t                 session_handle;
+	int32_t                 dev_handle;
+};
+
+/**
+ * struct cam_flush_dev_cmd - Control payload for flush devices
+ *
+ * @version:           Version
+ * @session_handle:    Session handle for the acquire command
+ * @dev_handle:        Device handle to be returned
+ * @flush_type:        Flush type:
+ *                     0 = flush specific request
+ *                     1 = flush all
+ * @reserved:          Reserved for 64 bit aligngment
+ * @req_id:            Request id that needs to cancel
+ *
+ */
+struct cam_flush_dev_cmd {
+	uint64_t       version;
+	int32_t        session_handle;
+	int32_t        dev_handle;
+	uint32_t       flush_type;
+	uint32_t       reserved;
+	int64_t        req_id;
+};
+
+/**
+ * struct cam_ubwc_config - UBWC Configuration Payload
+ *
+ * @api_version:         UBWC config api version
+ * @num_ports:           Number of ports to be configured
+ * @ubwc_plane_config:   Array of UBWC configurations per port
+ *                       Size [CAM_PACKET_MAX_PLANES - 1] per port
+ *                       as UBWC is supported on Y & C planes
+ *                       and therefore a max size of 2 planes
+ *
+ */
+struct cam_ubwc_config {
+	uint32_t   api_version;
+	uint32_t   num_ports;
+	struct cam_ubwc_plane_cfg_v1
+		   ubwc_plane_cfg[1][CAM_PACKET_MAX_PLANES - 1];
+};
+
+/**
+ * struct cam_cmd_mem_region_info -
+ *              Cmd buffer region info
+ *
+ * @mem_handle : Memory handle of the region
+ * @offset     : Offset if any
+ * @size       : Size of the region
+ * @flags      : Flags if any
+ */
+struct cam_cmd_mem_region_info {
+	int32_t   mem_handle;
+	uint32_t  offset;
+	uint32_t  size;
+	uint32_t  flags;
+};
+
+/**
+ * struct cam_cmd_mem_regions -
+ *        List of multiple memory descriptors of
+ *        of different regions
+ *
+ * @version        : Version number
+ * @num_regions    : Number of regions
+ * @map_info_array : Array of all the regions
+ */
+struct cam_cmd_mem_regions {
+	uint32_t version;
+	uint32_t num_regions;
+	struct cam_cmd_mem_region_info map_info_array[1];
+};
+
+
+#endif /* __UAPI_CAM_DEFS_H__ */
diff --git a/include/uapi/media/cam_fd.h b/include/uapi/media/cam_fd.h
new file mode 100644
index 0000000..885015d
--- /dev/null
+++ b/include/uapi/media/cam_fd.h
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_FD_H__
+#define __UAPI_CAM_FD_H__
+
+#include "cam_defs.h"
+
+#define CAM_FD_MAX_FACES                       35
+#define CAM_FD_RAW_RESULT_ENTRIES              512
+
+/* FD Op Codes */
+#define CAM_PACKET_OPCODES_FD_FRAME_UPDATE     0x0
+
+/* FD Command Buffer identifiers */
+#define CAM_FD_CMD_BUFFER_ID_GENERIC           0x0
+#define CAM_FD_CMD_BUFFER_ID_CDM               0x1
+#define CAM_FD_CMD_BUFFER_ID_MAX               0x2
+
+/* FD Blob types */
+#define CAM_FD_BLOB_TYPE_SOC_CLOCK_BW_REQUEST  0x0
+#define CAM_FD_BLOB_TYPE_RAW_RESULTS_REQUIRED  0x1
+
+/* FD Resource IDs */
+#define CAM_FD_INPUT_PORT_ID_IMAGE             0x0
+#define CAM_FD_INPUT_PORT_ID_MAX               0x1
+
+#define CAM_FD_OUTPUT_PORT_ID_RESULTS          0x0
+#define CAM_FD_OUTPUT_PORT_ID_RAW_RESULTS      0x1
+#define CAM_FD_OUTPUT_PORT_ID_WORK_BUFFER      0x2
+#define CAM_FD_OUTPUT_PORT_ID_MAX              0x3
+
+/**
+ * struct cam_fd_soc_clock_bw_request - SOC clock, bandwidth request info
+ *
+ * @clock_rate : Clock rate required while processing frame
+ * @bandwidth  : Bandwidth required while processing frame
+ * @reserved   : Reserved for future use
+ */
+struct cam_fd_soc_clock_bw_request {
+	uint64_t    clock_rate;
+	uint64_t    bandwidth;
+	uint64_t    reserved[4];
+};
+
+/**
+ * struct cam_fd_face - Face properties
+ *
+ * @prop1 : Property 1 of face
+ * @prop2 : Property 2 of face
+ * @prop3 : Property 3 of face
+ * @prop4 : Property 4 of face
+ *
+ * Do not change this layout, this is inline with how HW writes
+ * these values directly when the buffer is programmed to HW
+ */
+struct cam_fd_face {
+	uint32_t    prop1;
+	uint32_t    prop2;
+	uint32_t    prop3;
+	uint32_t    prop4;
+};
+
+/**
+ * struct cam_fd_results - FD results layout
+ *
+ * @faces      : Array of faces with face properties
+ * @face_count : Number of faces detected
+ * @reserved   : Reserved for alignment
+ *
+ * Do not change this layout, this is inline with how HW writes
+ * these values directly when the buffer is programmed to HW
+ */
+struct cam_fd_results {
+	struct cam_fd_face    faces[CAM_FD_MAX_FACES];
+	uint32_t              face_count;
+	uint32_t              reserved[3];
+};
+
+/**
+ * struct cam_fd_hw_caps - Face properties
+ *
+ * @core_version          : FD core version
+ * @wrapper_version       : FD wrapper version
+ * @raw_results_available : Whether raw results are available on this HW
+ * @supported_modes       : Modes supported by this HW.
+ * @reserved              : Reserved for future use
+ */
+struct cam_fd_hw_caps {
+	struct cam_hw_version    core_version;
+	struct cam_hw_version    wrapper_version;
+	uint32_t                 raw_results_available;
+	uint32_t                 supported_modes;
+	uint64_t                 reserved;
+};
+
+/**
+ * struct cam_fd_query_cap_cmd - FD Query capabilities information
+ *
+ * @device_iommu : FD IOMMU handles
+ * @cdm_iommu    : CDM iommu handles
+ * @hw_caps      : FD HW capabilities
+ * @reserved     : Reserved for alignment
+ */
+struct cam_fd_query_cap_cmd {
+	struct cam_iommu_handle    device_iommu;
+	struct cam_iommu_handle    cdm_iommu;
+	struct cam_fd_hw_caps      hw_caps;
+	uint64_t                   reserved;
+};
+
+/**
+ * struct cam_fd_acquire_dev_info - FD acquire device information
+ *
+ * @clk_bw_request  : SOC clock, bandwidth request
+ * @priority        : Priority for this acquire
+ * @mode            : Mode in which to run FD HW.
+ * @get_raw_results : Whether this acquire needs face raw results
+ *                    while frame processing
+ * @reserved        : Reserved field for 64 bit alignment
+ */
+struct cam_fd_acquire_dev_info {
+	struct cam_fd_soc_clock_bw_request clk_bw_request;
+	uint32_t                           priority;
+	uint32_t                           mode;
+	uint32_t                           get_raw_results;
+	uint32_t                           reserved[13];
+};
+
+#endif /* __UAPI_CAM_FD_H__ */
diff --git a/include/uapi/media/cam_icp.h b/include/uapi/media/cam_icp.h
new file mode 100644
index 0000000..cb0b139
--- /dev/null
+++ b/include/uapi/media/cam_icp.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_ICP_H__
+#define __UAPI_CAM_ICP_H__
+
+#include "cam_defs.h"
+
+/* icp, ipe, bps, cdm(ipe/bps) are used in querycap */
+#define CAM_ICP_DEV_TYPE_A5      1
+#define CAM_ICP_DEV_TYPE_IPE     2
+#define CAM_ICP_DEV_TYPE_BPS     3
+#define CAM_ICP_DEV_TYPE_IPE_CDM 4
+#define CAM_ICP_DEV_TYPE_BPS_CDM 5
+#define CAM_ICP_DEV_TYPE_MAX     5
+
+/* definitions needed for icp aquire device */
+#define CAM_ICP_RES_TYPE_BPS        1
+#define CAM_ICP_RES_TYPE_IPE_RT     2
+#define CAM_ICP_RES_TYPE_IPE        3
+#define CAM_ICP_RES_TYPE_MAX        4
+
+/* packet opcode types */
+#define CAM_ICP_OPCODE_IPE_UPDATE   0
+#define CAM_ICP_OPCODE_BPS_UPDATE   1
+#define CAM_ICP_OPCODE_IPE_SETTINGS 2
+#define CAM_ICP_OPCODE_BPS_SETTINGS 3
+
+
+/* IPE input port resource type */
+#define CAM_ICP_IPE_INPUT_IMAGE_FULL            0x0
+#define CAM_ICP_IPE_INPUT_IMAGE_DS4             0x1
+#define CAM_ICP_IPE_INPUT_IMAGE_DS16            0x2
+#define CAM_ICP_IPE_INPUT_IMAGE_DS64            0x3
+#define CAM_ICP_IPE_INPUT_IMAGE_FULL_REF        0x4
+#define CAM_ICP_IPE_INPUT_IMAGE_DS4_REF         0x5
+#define CAM_ICP_IPE_INPUT_IMAGE_DS16_REF        0x6
+#define CAM_ICP_IPE_INPUT_IMAGE_DS64_REF        0x7
+
+/* IPE output port resource type */
+#define CAM_ICP_IPE_OUTPUT_IMAGE_DISPLAY        0x8
+#define CAM_ICP_IPE_OUTPUT_IMAGE_VIDEO          0x9
+#define CAM_ICP_IPE_OUTPUT_IMAGE_FULL_REF       0xA
+#define CAM_ICP_IPE_OUTPUT_IMAGE_DS4_REF        0xB
+#define CAM_ICP_IPE_OUTPUT_IMAGE_DS16_REF       0xC
+#define CAM_ICP_IPE_OUTPUT_IMAGE_DS64_REF       0xD
+
+#define CAM_ICP_IPE_IMAGE_MAX                   0xE
+
+/* BPS input port resource type */
+#define CAM_ICP_BPS_INPUT_IMAGE                 0x0
+
+/* BPS output port resource type */
+#define CAM_ICP_BPS_OUTPUT_IMAGE_FULL           0x1
+#define CAM_ICP_BPS_OUTPUT_IMAGE_DS4            0x2
+#define CAM_ICP_BPS_OUTPUT_IMAGE_DS16           0x3
+#define CAM_ICP_BPS_OUTPUT_IMAGE_DS64           0x4
+#define CAM_ICP_BPS_OUTPUT_IMAGE_STATS_BG       0x5
+#define CAM_ICP_BPS_OUTPUT_IMAGE_STATS_BHIST    0x6
+#define CAM_ICP_BPS_OUTPUT_IMAGE_REG1           0x7
+#define CAM_ICP_BPS_OUTPUT_IMAGE_REG2           0x8
+
+#define CAM_ICP_BPS_IO_IMAGES_MAX               0x9
+
+/* Command meta types */
+#define CAM_ICP_CMD_META_GENERIC_BLOB           0x1
+
+/* Generic blob types */
+#define CAM_ICP_CMD_GENERIC_BLOB_CLK            0x1
+#define CAM_ICP_CMD_GENERIC_BLOB_CFG_IO         0x2
+#define CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_MAP     0x3
+#define CAM_ICP_CMD_GENERIC_BLOB_FW_MEM_UNMAP   0x4
+
+/**
+ * struct cam_icp_clk_bw_request
+ *
+ * @budget_ns: Time required to process frame
+ * @frame_cycles: Frame cycles needed to process the frame
+ * @rt_flag: Flag to indicate real time stream
+ * @uncompressed_bw: Bandwidth required to process frame
+ * @compressed_bw: Compressed bandwidth to process frame
+ */
+struct cam_icp_clk_bw_request {
+	uint64_t budget_ns;
+	uint32_t frame_cycles;
+	uint32_t rt_flag;
+	uint64_t uncompressed_bw;
+	uint64_t compressed_bw;
+};
+
+/**
+ * struct cam_icp_dev_ver - Device information for particular hw type
+ *
+ * This is used to get device version info of
+ * ICP, IPE, BPS and CDM related IPE and BPS from firmware
+ * and use this info in CAM_QUERY_CAP IOCTL
+ *
+ * @dev_type: hardware type for the cap info(icp, ipe, bps, cdm(ipe/bps))
+ * @reserved: reserved field
+ * @hw_ver: major, minor and incr values of a device version
+ */
+struct cam_icp_dev_ver {
+	uint32_t dev_type;
+	uint32_t reserved;
+	struct cam_hw_version hw_ver;
+};
+
+/**
+ * struct cam_icp_ver - ICP version info
+ *
+ * This strcuture is used for fw and api version
+ * this is used to get firmware version and api version from firmware
+ * and use this info in CAM_QUERY_CAP IOCTL
+ *
+ * @major: FW version major
+ * @minor: FW version minor
+ * @revision: FW version increment
+ */
+struct cam_icp_ver {
+	uint32_t major;
+	uint32_t minor;
+	uint32_t revision;
+	uint32_t reserved;
+};
+
+/**
+ * struct cam_icp_query_cap_cmd - ICP query device capability payload
+ *
+ * @dev_iommu_handle: icp iommu handles for secure/non secure modes
+ * @cdm_iommu_handle: iommu handles for secure/non secure modes
+ * @fw_version: firmware version info
+ * @api_version: api version info
+ * @num_ipe: number of ipes
+ * @num_bps: number of bps
+ * @dev_ver: returned device capability array
+ */
+struct cam_icp_query_cap_cmd {
+	struct cam_iommu_handle dev_iommu_handle;
+	struct cam_iommu_handle cdm_iommu_handle;
+	struct cam_icp_ver fw_version;
+	struct cam_icp_ver api_version;
+	uint32_t num_ipe;
+	uint32_t num_bps;
+	struct cam_icp_dev_ver dev_ver[CAM_ICP_DEV_TYPE_MAX];
+};
+
+/**
+ * struct cam_icp_res_info - ICP output resource info
+ *
+ * @format: format of the resource
+ * @width:  width in pixels
+ * @height: height in lines
+ * @fps:  fps
+ */
+struct cam_icp_res_info {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	uint32_t fps;
+};
+
+/**
+ * struct cam_icp_acquire_dev_info - An ICP device info
+ *
+ * @scratch_mem_size: Output param - size of scratch memory
+ * @dev_type: device type (IPE_RT/IPE_NON_RT/BPS)
+ * @io_config_cmd_size: size of IO config command
+ * @io_config_cmd_handle: IO config command for each acquire
+ * @secure_mode: camera mode (secure/non secure)
+ * @chain_info: chaining info of FW device handles
+ * @in_res: resource info used for clock and bandwidth calculation
+ * @num_out_res: number of output resources
+ * @out_res: output resource
+ */
+struct cam_icp_acquire_dev_info {
+	uint32_t scratch_mem_size;
+	uint32_t dev_type;
+	uint32_t io_config_cmd_size;
+	int32_t  io_config_cmd_handle;
+	uint32_t secure_mode;
+	int32_t chain_info;
+	struct cam_icp_res_info in_res;
+	uint32_t num_out_res;
+	struct cam_icp_res_info out_res[1];
+} __attribute__((__packed__));
+
+#endif /* __UAPI_CAM_ICP_H__ */
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
new file mode 100644
index 0000000..16105557
--- /dev/null
+++ b/include/uapi/media/cam_isp.h
@@ -0,0 +1,432 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_ISP_H__
+#define __UAPI_CAM_ISP_H__
+
+#include "cam_defs.h"
+#include "cam_isp_vfe.h"
+#include "cam_isp_ife.h"
+
+
+/* ISP driver name */
+#define CAM_ISP_DEV_NAME                        "cam-isp"
+
+/* HW type */
+#define CAM_ISP_HW_BASE                         0
+#define CAM_ISP_HW_CSID                         1
+#define CAM_ISP_HW_VFE                          2
+#define CAM_ISP_HW_IFE                          3
+#define CAM_ISP_HW_ISPIF                        4
+#define CAM_ISP_HW_MAX                          5
+
+/* Color Pattern */
+#define CAM_ISP_PATTERN_BAYER_RGRGRG            0
+#define CAM_ISP_PATTERN_BAYER_GRGRGR            1
+#define CAM_ISP_PATTERN_BAYER_BGBGBG            2
+#define CAM_ISP_PATTERN_BAYER_GBGBGB            3
+#define CAM_ISP_PATTERN_YUV_YCBYCR              4
+#define CAM_ISP_PATTERN_YUV_YCRYCB              5
+#define CAM_ISP_PATTERN_YUV_CBYCRY              6
+#define CAM_ISP_PATTERN_YUV_CRYCBY              7
+#define CAM_ISP_PATTERN_MAX                     8
+
+/* Usage Type */
+#define CAM_ISP_RES_USAGE_SINGLE                0
+#define CAM_ISP_RES_USAGE_DUAL                  1
+#define CAM_ISP_RES_USAGE_MAX                   2
+
+/* Resource ID */
+#define CAM_ISP_RES_ID_PORT                     0
+#define CAM_ISP_RES_ID_CLK                      1
+#define CAM_ISP_RES_ID_MAX                      2
+
+/* Resource Type - Type of resource for the resource id
+ * defined in cam_isp_vfe.h, cam_isp_ife.h
+ */
+
+/* Lane Type in input resource for Port */
+#define CAM_ISP_LANE_TYPE_DPHY                  0
+#define CAM_ISP_LANE_TYPE_CPHY                  1
+#define CAM_ISP_LANE_TYPE_MAX                   2
+
+/* ISP Resurce Composite Group ID */
+#define CAM_ISP_RES_COMP_GROUP_NONE             0
+#define CAM_ISP_RES_COMP_GROUP_ID_0             1
+#define CAM_ISP_RES_COMP_GROUP_ID_1             2
+#define CAM_ISP_RES_COMP_GROUP_ID_2             3
+#define CAM_ISP_RES_COMP_GROUP_ID_3             4
+#define CAM_ISP_RES_COMP_GROUP_ID_4             5
+#define CAM_ISP_RES_COMP_GROUP_ID_5             6
+#define CAM_ISP_RES_COMP_GROUP_ID_MAX           6
+
+/* ISP packet opcode for ISP */
+#define CAM_ISP_PACKET_OP_BASE                  0
+#define CAM_ISP_PACKET_INIT_DEV                 1
+#define CAM_ISP_PACKET_UPDATE_DEV               2
+#define CAM_ISP_PACKET_OP_MAX                   3
+
+/* ISP packet meta_data type for command buffer */
+#define CAM_ISP_PACKET_META_BASE                0
+#define CAM_ISP_PACKET_META_LEFT                1
+#define CAM_ISP_PACKET_META_RIGHT               2
+#define CAM_ISP_PACKET_META_COMMON              3
+#define CAM_ISP_PACKET_META_DMI_LEFT            4
+#define CAM_ISP_PACKET_META_DMI_RIGHT           5
+#define CAM_ISP_PACKET_META_DMI_COMMON          6
+#define CAM_ISP_PACKET_META_CLOCK               7
+#define CAM_ISP_PACKET_META_CSID                8
+#define CAM_ISP_PACKET_META_DUAL_CONFIG         9
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_LEFT   10
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_RIGHT  11
+#define CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON 12
+
+/* DSP mode */
+#define CAM_ISP_DSP_MODE_NONE                   0
+#define CAM_ISP_DSP_MODE_ONE_WAY                1
+#define CAM_ISP_DSP_MODE_ROUND                  2
+
+/* ISP Generic Cmd Buffer Blob types */
+#define CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG          0
+#define CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG        1
+#define CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG           2
+#define CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG         3
+#define CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG   4
+
+/* Query devices */
+/**
+ * struct cam_isp_dev_cap_info - A cap info for particular hw type
+ *
+ * @hw_type:            Hardware type for the cap info
+ * @reserved:           reserved field for alignment
+ * @hw_version:         Hardware version
+ *
+ */
+struct cam_isp_dev_cap_info {
+	uint32_t              hw_type;
+	uint32_t              reserved;
+	struct cam_hw_version hw_version;
+};
+
+/**
+ * struct cam_isp_query_cap_cmd - ISP query device capability payload
+ *
+ * @device_iommu:               returned iommu handles for device
+ * @cdm_iommu:                  returned iommu handles for cdm
+ * @num_dev:                    returned number of device capabilities
+ * @reserved:                   reserved field for alignment
+ * @dev_caps:                   returned device capability array
+ *
+ */
+struct cam_isp_query_cap_cmd {
+	struct cam_iommu_handle       device_iommu;
+	struct cam_iommu_handle       cdm_iommu;
+	int32_t                       num_dev;
+	uint32_t                      reserved;
+	struct cam_isp_dev_cap_info   dev_caps[CAM_ISP_HW_MAX];
+};
+
+/* Acquire Device */
+/**
+ * struct cam_isp_out_port_info - An output port resource info
+ *
+ * @res_type:                   output resource type defined in file
+ *                              cam_isp_vfe.h or cam_isp_ife.h
+ * @format:                     output format of the resource
+ * @wdith:                      output width in pixels
+ * @height:                     output height in lines
+ * @comp_grp_id:                composite group id for the resource.
+ * @split_point:                split point in pixels for the dual VFE.
+ * @secure_mode:                flag to tell if output should be run in secure
+ *                              mode or not. See cam_defs.h for definition
+ * @reserved:                   reserved field for alignment
+ *
+ */
+struct cam_isp_out_port_info {
+	uint32_t                res_type;
+	uint32_t                format;
+	uint32_t                width;
+	uint32_t                height;
+	uint32_t                comp_grp_id;
+	uint32_t                split_point;
+	uint32_t                secure_mode;
+	uint32_t                reserved;
+};
+
+/**
+ * struct cam_isp_in_port_info - An input port resource info
+ *
+ * @res_type:                   input resource type define in file
+ *                              cam_isp_vfe.h or cam_isp_ife.h
+ * @lane_type:                  lane type: c-phy or d-phy.
+ * @lane_num:                   active lane number
+ * @lane_cfg:                   lane configurations: 4 bits per lane
+ * @vc:                         input virtual channel number
+ * @dt:                         input data type number
+ * @format:                     input format
+ * @test_pattern:               test pattern for the testgen
+ * @usage_type:                 whether dual vfe is required
+ * @left_start:                 left input start offset in pixels
+ * @left_stop:                  left input stop offset in pixels
+ * @left_width:                 left input width in pixels
+ * @right_start:                right input start offset in pixels.
+ *                              Only for Dual VFE
+ * @right_stop:                 right input stop offset in pixels.
+ *                              Only for Dual VFE
+ * @right_width:                right input width in pixels.
+ *                              Only for dual VFE
+ * @line_start:                 top of the line number
+ * @line_stop:                  bottome of the line number
+ * @height:                     input height in lines
+ * @pixel_clk;                  sensor output clock
+ * @batch_size:                 batch size for HFR mode
+ * @dsp_mode:                   DSP stream mode (Defines as CAM_ISP_DSP_MODE_*)
+ * @hbi_cnt:                    HBI count for the camif input
+ * @reserved:                   Reserved field for alignment
+ * @num_out_res:                number of the output resource associated
+ * @data:                       payload that contains the output resources
+ *
+ */
+struct cam_isp_in_port_info {
+	uint32_t                        res_type;
+	uint32_t                        lane_type;
+	uint32_t                        lane_num;
+	uint32_t                        lane_cfg;
+	uint32_t                        vc;
+	uint32_t                        dt;
+	uint32_t                        format;
+	uint32_t                        test_pattern;
+	uint32_t                        usage_type;
+	uint32_t                        left_start;
+	uint32_t                        left_stop;
+	uint32_t                        left_width;
+	uint32_t                        right_start;
+	uint32_t                        right_stop;
+	uint32_t                        right_width;
+	uint32_t                        line_start;
+	uint32_t                        line_stop;
+	uint32_t                        height;
+	uint32_t                        pixel_clk;
+	uint32_t                        batch_size;
+	uint32_t                        dsp_mode;
+	uint32_t                        hbi_cnt;
+	uint32_t                        reserved;
+	uint32_t                        num_out_res;
+	struct cam_isp_out_port_info    data[1];
+};
+
+/**
+ * struct cam_isp_resource - A resource bundle
+ *
+ * @resoruce_id:                resource id for the resource bundle
+ * @length:                     length of the while resource blob
+ * @handle_type:                type of the resource handle
+ * @reserved:                   reserved field for alignment
+ * @res_hdl:                    resource handle that points to the
+ *                                     resource array;
+ *
+ */
+struct cam_isp_resource {
+	uint32_t                       resource_id;
+	uint32_t                       length;
+	uint32_t                       handle_type;
+	uint32_t                       reserved;
+	uint64_t                       res_hdl;
+};
+
+/**
+ * struct cam_isp_port_hfr_config - HFR configuration for this port
+ *
+ * @resource_type:              Resource type
+ * @subsample_pattern:          Subsample pattern. Used in HFR mode. It
+ *                              should be consistent with batchSize and
+ *                              CAMIF programming.
+ * @subsample_period:           Subsample period. Used in HFR mode. It
+ *                              should be consistent with batchSize and
+ *                              CAMIF programming.
+ * @framedrop_pattern:          Framedrop pattern
+ * @framedrop_period:           Framedrop period
+ * @reserved:                   Reserved for alignment
+ */
+struct cam_isp_port_hfr_config {
+	uint32_t                       resource_type;
+	uint32_t                       subsample_pattern;
+	uint32_t                       subsample_period;
+	uint32_t                       framedrop_pattern;
+	uint32_t                       framedrop_period;
+	uint32_t                       reserved;
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_resource_hfr_config - Resource HFR configuration
+ *
+ * @num_ports:                  Number of ports
+ * @reserved:                   Reserved for alignment
+ * @port_hfr_config:            HFR configuration for each IO port
+ */
+struct cam_isp_resource_hfr_config {
+	uint32_t                       num_ports;
+	uint32_t                       reserved;
+	struct cam_isp_port_hfr_config port_hfr_config[1];
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_dual_split_params - dual isp spilt parameters
+ *
+ * @split_point:                Split point information x, where (0 < x < width)
+ *                              left ISP's input ends at x + righ padding and
+ *                              Right ISP's input starts at x - left padding
+ * @right_padding:              Padding added past the split point for left
+ *                              ISP's input
+ * @left_padding:               Padding added before split point for right
+ *                              ISP's input
+ * @reserved:                   Reserved filed for alignment
+ *
+ */
+struct cam_isp_dual_split_params {
+	uint32_t                       split_point;
+	uint32_t                       right_padding;
+	uint32_t                       left_padding;
+	uint32_t                       reserved;
+};
+
+/**
+ * struct cam_isp_dual_stripe_config - stripe config per bus client
+ *
+ * @offset:                     Start horizontal offset relative to
+ *                              output buffer
+ *                              In UBWC mode, this value indicates the H_INIT
+ *                              value in pixel
+ * @width:                      Width of the stripe in bytes
+ * @tileconfig                  Ubwc meta tile config. Contain the partial
+ *                              tile info
+ * @port_id:                    port id of ISP output
+ *
+ */
+struct cam_isp_dual_stripe_config {
+	uint32_t                       offset;
+	uint32_t                       width;
+	uint32_t                       tileconfig;
+	uint32_t                       port_id;
+};
+
+/**
+ * struct cam_isp_dual_config - dual isp configuration
+ *
+ * @num_ports                   Number of isp output ports
+ * @reserved                    Reserved field for alignment
+ * @split_params:               Inpput split parameters
+ * @stripes:                    Stripe information
+ *
+ */
+struct cam_isp_dual_config {
+	uint32_t                           num_ports;
+	uint32_t                           reserved;
+	struct cam_isp_dual_split_params   split_params;
+	struct cam_isp_dual_stripe_config  stripes[1];
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_clock_config - Clock configuration
+ *
+ * @usage_type:                 Usage type (Single/Dual)
+ * @num_rdi:                    Number of RDI votes
+ * @left_pix_hz:                Pixel Clock for Left ISP
+ * @right_pix_hz:               Pixel Clock for Right ISP, valid only if Dual
+ * @rdi_hz:                     RDI Clock. ISP clock will be max of RDI and
+ *                              PIX clocks. For a particular context which ISP
+ *                              HW the RDI is allocated to is not known to UMD.
+ *                              Hence pass the clock and let KMD decide.
+ */
+struct cam_isp_clock_config {
+	uint32_t                       usage_type;
+	uint32_t                       num_rdi;
+	uint64_t                       left_pix_hz;
+	uint64_t                       right_pix_hz;
+	uint64_t                       rdi_hz[1];
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_csid_clock_config - CSID clock configuration
+ *
+ * @csid_clock                  CSID clock
+ */
+struct cam_isp_csid_clock_config {
+	uint64_t                       csid_clock;
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_bw_vote - Bandwidth vote information
+ *
+ * @resource_id:                Resource ID
+ * @reserved:                   Reserved field for alignment
+ * @cam_bw_bps:                 Bandwidth vote for CAMNOC
+ * @ext_bw_bps:                 Bandwidth vote for path-to-DDR after CAMNOC
+ */
+
+struct cam_isp_bw_vote {
+	uint32_t                       resource_id;
+	uint32_t                       reserved;
+	uint64_t                       cam_bw_bps;
+	uint64_t                       ext_bw_bps;
+} __attribute__((packed));
+
+/**
+ * struct cam_isp_bw_config - Bandwidth configuration
+ *
+ * @usage_type:                 Usage type (Single/Dual)
+ * @num_rdi:                    Number of RDI votes
+ * @left_pix_vote:              Bandwidth vote for left ISP
+ * @right_pix_vote:             Bandwidth vote for right ISP
+ * @rdi_vote:                   RDI bandwidth requirements
+ */
+
+struct cam_isp_bw_config {
+	uint32_t                       usage_type;
+	uint32_t                       num_rdi;
+	struct cam_isp_bw_vote         left_pix_vote;
+	struct cam_isp_bw_vote         right_pix_vote;
+	struct cam_isp_bw_vote         rdi_vote[1];
+} __attribute__((packed));
+
+
+/* Acquire Device/HW v2 */
+
+/**
+ * struct cam_isp_acquire_hw_info - ISP acquire HW params
+ *
+ * @common_info_version  : Version of common info struct used
+ * @common_info_size     : Size of common info struct used
+ * @common_info_offset   : Offset of common info from start of data
+ * @num_inputs           : Number of inputs
+ * @input_info_version   : Version of input info struct used
+ * @input_info_size      : Size of input info struct used
+ * @input_info_offset    : Offset of input info from start of data
+ * @data                 : Start of data region
+ */
+struct cam_isp_acquire_hw_info {
+	uint16_t                common_info_version;
+	uint16_t                common_info_size;
+	uint32_t                common_info_offset;
+	uint32_t                num_inputs;
+	uint32_t                input_info_version;
+	uint32_t                input_info_size;
+	uint32_t                input_info_offset;
+	uint64_t                data;
+};
+
+#define CAM_ISP_ACQUIRE_COMMON_VER0         0x1000
+
+#define CAM_ISP_ACQUIRE_COMMON_SIZE_VER0    0x0
+
+#define CAM_ISP_ACQUIRE_INPUT_VER0          0x2000
+
+#define CAM_ISP_ACQUIRE_INPUT_SIZE_VER0     sizeof(struct cam_isp_in_port_info)
+
+#define CAM_ISP_ACQUIRE_OUT_VER0            0x3000
+
+#define CAM_ISP_ACQUIRE_OUT_SIZE_VER0       sizeof(struct cam_isp_out_port_info)
+
+#endif /* __UAPI_CAM_ISP_H__ */
diff --git a/include/uapi/media/cam_isp_ife.h b/include/uapi/media/cam_isp_ife.h
new file mode 100644
index 0000000..54890e6
--- /dev/null
+++ b/include/uapi/media/cam_isp_ife.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_ISP_IFE_H__
+#define __UAPI_CAM_ISP_IFE_H__
+
+/* IFE output port resource type (global unique)*/
+#define CAM_ISP_IFE_OUT_RES_BASE               0x3000
+
+#define CAM_ISP_IFE_OUT_RES_FULL               (CAM_ISP_IFE_OUT_RES_BASE + 0)
+#define CAM_ISP_IFE_OUT_RES_DS4                (CAM_ISP_IFE_OUT_RES_BASE + 1)
+#define CAM_ISP_IFE_OUT_RES_DS16               (CAM_ISP_IFE_OUT_RES_BASE + 2)
+#define CAM_ISP_IFE_OUT_RES_RAW_DUMP           (CAM_ISP_IFE_OUT_RES_BASE + 3)
+#define CAM_ISP_IFE_OUT_RES_FD                 (CAM_ISP_IFE_OUT_RES_BASE + 4)
+#define CAM_ISP_IFE_OUT_RES_PDAF               (CAM_ISP_IFE_OUT_RES_BASE + 5)
+#define CAM_ISP_IFE_OUT_RES_RDI_0              (CAM_ISP_IFE_OUT_RES_BASE + 6)
+#define CAM_ISP_IFE_OUT_RES_RDI_1              (CAM_ISP_IFE_OUT_RES_BASE + 7)
+#define CAM_ISP_IFE_OUT_RES_RDI_2              (CAM_ISP_IFE_OUT_RES_BASE + 8)
+#define CAM_ISP_IFE_OUT_RES_RDI_3              (CAM_ISP_IFE_OUT_RES_BASE + 9)
+#define CAM_ISP_IFE_OUT_RES_STATS_HDR_BE       (CAM_ISP_IFE_OUT_RES_BASE + 10)
+#define CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST    (CAM_ISP_IFE_OUT_RES_BASE + 11)
+#define CAM_ISP_IFE_OUT_RES_STATS_TL_BG        (CAM_ISP_IFE_OUT_RES_BASE + 12)
+#define CAM_ISP_IFE_OUT_RES_STATS_BF           (CAM_ISP_IFE_OUT_RES_BASE + 13)
+#define CAM_ISP_IFE_OUT_RES_STATS_AWB_BG       (CAM_ISP_IFE_OUT_RES_BASE + 14)
+#define CAM_ISP_IFE_OUT_RES_STATS_BHIST        (CAM_ISP_IFE_OUT_RES_BASE + 15)
+#define CAM_ISP_IFE_OUT_RES_STATS_RS           (CAM_ISP_IFE_OUT_RES_BASE + 16)
+#define CAM_ISP_IFE_OUT_RES_STATS_CS           (CAM_ISP_IFE_OUT_RES_BASE + 17)
+#define CAM_ISP_IFE_OUT_RES_STATS_IHIST        (CAM_ISP_IFE_OUT_RES_BASE + 18)
+#define CAM_ISP_IFE_OUT_RES_FULL_DISP          (CAM_ISP_IFE_OUT_RES_BASE + 19)
+#define CAM_ISP_IFE_OUT_RES_DS4_DISP           (CAM_ISP_IFE_OUT_RES_BASE + 20)
+#define CAM_ISP_IFE_OUT_RES_DS16_DISP          (CAM_ISP_IFE_OUT_RES_BASE + 21)
+#define CAM_ISP_IFE_OUT_RES_2PD                (CAM_ISP_IFE_OUT_RES_BASE + 22)
+
+#define CAM_ISP_IFE_OUT_RES_MAX                (CAM_ISP_IFE_OUT_RES_BASE + 23)
+
+
+/* IFE input port resource type (global unique) */
+#define CAM_ISP_IFE_IN_RES_BASE                 0x4000
+
+#define CAM_ISP_IFE_IN_RES_TPG                 (CAM_ISP_IFE_IN_RES_BASE + 0)
+#define CAM_ISP_IFE_IN_RES_PHY_0               (CAM_ISP_IFE_IN_RES_BASE + 1)
+#define CAM_ISP_IFE_IN_RES_PHY_1               (CAM_ISP_IFE_IN_RES_BASE + 2)
+#define CAM_ISP_IFE_IN_RES_PHY_2               (CAM_ISP_IFE_IN_RES_BASE + 3)
+#define CAM_ISP_IFE_IN_RES_PHY_3               (CAM_ISP_IFE_IN_RES_BASE + 4)
+#define CAM_ISP_IFE_IN_RES_MAX                 (CAM_ISP_IFE_IN_RES_BASE + 5)
+
+#endif /* __UAPI_CAM_ISP_IFE_H__ */
diff --git a/include/uapi/media/cam_isp_vfe.h b/include/uapi/media/cam_isp_vfe.h
new file mode 100644
index 0000000..bcda475
--- /dev/null
+++ b/include/uapi/media/cam_isp_vfe.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_ISP_VFE_H__
+#define __UAPI_CAM_ISP_VFE_H__
+
+/* VFE output port resource type  (global unique)  */
+#define CAM_ISP_VFE_OUT_RES_BASE               0x1000
+
+#define CAM_ISP_VFE_OUT_RES_ENC                (CAM_ISP_VFE_OUT_RES_BASE + 0)
+#define CAM_ISP_VFE_OUT_RES_VIEW               (CAM_ISP_VFE_OUT_RES_BASE + 1)
+#define CAM_ISP_VFE_OUT_RES_VID                (CAM_ISP_VFE_OUT_RES_BASE + 2)
+#define CAM_ISP_VFE_OUT_RES_RDI_0              (CAM_ISP_VFE_OUT_RES_BASE + 3)
+#define CAM_ISP_VFE_OUT_RES_RDI_1              (CAM_ISP_VFE_OUT_RES_BASE + 4)
+#define CAM_ISP_VFE_OUT_RES_RDI_2              (CAM_ISP_VFE_OUT_RES_BASE + 5)
+#define CAM_ISP_VFE_OUT_RES_RDI_3              (CAM_ISP_VFE_OUT_RES_BASE + 6)
+#define CAM_ISP_VFE_OUT_RES_STATS_AEC          (CAM_ISP_VFE_OUT_RES_BASE + 7)
+#define CAM_ISP_VFE_OUT_RES_STATS_AF           (CAM_ISP_VFE_OUT_RES_BASE + 8)
+#define CAM_ISP_VFE_OUT_RES_STATS_AWB          (CAM_ISP_VFE_OUT_RES_BASE + 9)
+#define CAM_ISP_VFE_OUT_RES_STATS_RS           (CAM_ISP_VFE_OUT_RES_BASE + 10)
+#define CAM_ISP_VFE_OUT_RES_STATS_CS           (CAM_ISP_VFE_OUT_RES_BASE + 11)
+#define CAM_ISP_VFE_OUT_RES_STATS_IHIST        (CAM_ISP_VFE_OUT_RES_BASE + 12)
+#define CAM_ISP_VFE_OUT_RES_STATS_SKIN         (CAM_ISP_VFE_OUT_RES_BASE + 13)
+#define CAM_ISP_VFE_OUT_RES_STATS_BG           (CAM_ISP_VFE_OUT_RES_BASE + 14)
+#define CAM_ISP_VFE_OUT_RES_STATS_BF           (CAM_ISP_VFE_OUT_RES_BASE + 15)
+#define CAM_ISP_VFE_OUT_RES_STATS_BE           (CAM_ISP_VFE_OUT_RES_BASE + 16)
+#define CAM_ISP_VFE_OUT_RES_STATS_BHIST        (CAM_ISP_VFE_OUT_RES_BASE + 17)
+#define CAM_ISP_VFE_OUT_RES_STATS_BF_SCALE     (CAM_ISP_VFE_OUT_RES_BASE + 18)
+#define CAM_ISP_VFE_OUT_RES_STATS_HDR_BE       (CAM_ISP_VFE_OUT_RES_BASE + 19)
+#define CAM_ISP_VFE_OUT_RES_STATS_HDR_BHIST    (CAM_ISP_VFE_OUT_RES_BASE + 20)
+#define CAM_ISP_VFE_OUT_RES_STATS_AEC_BG       (CAM_ISP_VFE_OUT_RES_BASE + 21)
+#define CAM_ISP_VFE_OUT_RES_CAMIF_RAW          (CAM_ISP_VFE_OUT_RES_BASE + 22)
+#define CAM_ISP_VFE_OUT_RES_IDEAL_RAW          (CAM_ISP_VFE_OUT_RES_BASE + 23)
+#define CAM_ISP_VFE_OUT_RES_MAX                (CAM_ISP_VFE_OUT_RES_BASE + 24)
+
+/* VFE input port_ resource type (global unique) */
+#define CAM_ISP_VFE_IN_RES_BASE                0x2000
+
+#define CAM_ISP_VFE_IN_RES_TPG                 (CAM_ISP_VFE_IN_RES_BASE + 0)
+#define CAM_ISP_VFE_IN_RES_PHY_0               (CAM_ISP_VFE_IN_RES_BASE + 1)
+#define CAM_ISP_VFE_IN_RES_PHY_1               (CAM_ISP_VFE_IN_RES_BASE + 2)
+#define CAM_ISP_VFE_IN_RES_PHY_2               (CAM_ISP_VFE_IN_RES_BASE + 3)
+#define CAM_ISP_VFE_IN_RES_PHY_3               (CAM_ISP_VFE_IN_RES_BASE + 4)
+#define CAM_ISP_VFE_IN_RES_FE                  (CAM_ISP_VFE_IN_RES_BASE + 5)
+#define CAM_ISP_VFE_IN_RES_MAX                 (CAM_ISP_VFE_IN_RES_BASE + 6)
+
+#endif /* __UAPI_CAM_ISP_VFE_H__ */
diff --git a/include/uapi/media/cam_jpeg.h b/include/uapi/media/cam_jpeg.h
new file mode 100644
index 0000000..defaf69
--- /dev/null
+++ b/include/uapi/media/cam_jpeg.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_JPEG_H__
+#define __UAPI_CAM_JPEG_H__
+
+#include "cam_defs.h"
+
+/* enc, dma, cdm(enc/dma) are used in querycap */
+#define CAM_JPEG_DEV_TYPE_ENC      0
+#define CAM_JPEG_DEV_TYPE_DMA      1
+#define CAM_JPEG_DEV_TYPE_MAX      2
+
+#define CAM_JPEG_NUM_DEV_PER_RES_MAX      1
+
+/* definitions needed for jpeg aquire device */
+#define CAM_JPEG_RES_TYPE_ENC        0
+#define CAM_JPEG_RES_TYPE_DMA        1
+#define CAM_JPEG_RES_TYPE_MAX        2
+
+/* packet opcode types */
+#define CAM_JPEG_OPCODE_ENC_UPDATE 0
+#define CAM_JPEG_OPCODE_DMA_UPDATE 1
+
+/* ENC input port resource type */
+#define CAM_JPEG_ENC_INPUT_IMAGE                 0x0
+
+/* ENC output port resource type */
+#define CAM_JPEG_ENC_OUTPUT_IMAGE                0x1
+
+#define CAM_JPEG_ENC_IO_IMAGES_MAX               0x2
+
+/* DMA input port resource type */
+#define CAM_JPEG_DMA_INPUT_IMAGE                 0x0
+
+/* DMA output port resource type */
+#define CAM_JPEG_DMA_OUTPUT_IMAGE                0x1
+
+#define CAM_JPEG_DMA_IO_IMAGES_MAX               0x2
+
+#define CAM_JPEG_IMAGE_MAX                       0x2
+
+/**
+ * struct cam_jpeg_dev_ver - Device information for particular hw type
+ *
+ * This is used to get device version info of JPEG ENC, JPEG DMA
+ * from hardware and use this info in CAM_QUERY_CAP IOCTL
+ *
+ * @size : Size of struct passed
+ * @dev_type: Hardware type for the cap info(jpeg enc, jpeg dma)
+ * @hw_ver: Major, minor and incr values of a device version
+ */
+struct cam_jpeg_dev_ver {
+	uint32_t size;
+	uint32_t dev_type;
+	struct cam_hw_version hw_ver;
+};
+
+/**
+ * struct cam_jpeg_query_cap_cmd - JPEG query device capability payload
+ *
+ * @dev_iommu_handle: Jpeg iommu handles for secure/non secure
+ *      modes
+ * @cdm_iommu_handle: Iommu handles for secure/non secure modes
+ * @num_enc: Number of encoder
+ * @num_dma: Number of dma
+ * @dev_ver: Returned device capability array
+ */
+struct cam_jpeg_query_cap_cmd {
+	struct cam_iommu_handle dev_iommu_handle;
+	struct cam_iommu_handle cdm_iommu_handle;
+	uint32_t num_enc;
+	uint32_t num_dma;
+	struct cam_jpeg_dev_ver dev_ver[CAM_JPEG_DEV_TYPE_MAX];
+};
+
+/**
+ * struct cam_jpeg_res_info - JPEG output resource info
+ *
+ * @format: Format of the resource
+ * @width:  Width in pixels
+ * @height: Height in lines
+ * @fps:  Fps
+ */
+struct cam_jpeg_res_info {
+	uint32_t format;
+	uint32_t width;
+	uint32_t height;
+	uint32_t fps;
+};
+
+/**
+ * struct cam_jpeg_acquire_dev_info - An JPEG device info
+ *
+ * @dev_type: Device type (ENC/DMA)
+ * @reserved: Reserved Bytes
+ * @in_res: In resource info
+ * @in_res: Iut resource info
+ */
+struct cam_jpeg_acquire_dev_info {
+	uint32_t dev_type;
+	uint32_t reserved;
+	struct cam_jpeg_res_info in_res;
+	struct cam_jpeg_res_info out_res;
+};
+
+/**
+ * struct cam_jpeg_config_inout_param_info - JPEG Config time
+ *     input output params
+ *
+ * @clk_index: Input Param- clock selection index.(-1 default)
+ * @output_size: Output Param - jpeg encode/dma output size in
+ *     bytes
+ */
+struct cam_jpeg_config_inout_param_info {
+	int32_t clk_index;
+	int32_t output_size;
+};
+
+#endif /* __UAPI_CAM_JPEG_H__ */
diff --git a/include/uapi/media/cam_lrme.h b/include/uapi/media/cam_lrme.h
new file mode 100644
index 0000000..9e11916
--- /dev/null
+++ b/include/uapi/media/cam_lrme.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_LRME_H__
+#define __UAPI_CAM_LRME_H__
+
+#include "cam_defs.h"
+
+/* LRME Resource Types */
+
+enum CAM_LRME_IO_TYPE {
+	CAM_LRME_IO_TYPE_TAR,
+	CAM_LRME_IO_TYPE_REF,
+	CAM_LRME_IO_TYPE_RES,
+	CAM_LRME_IO_TYPE_DS2,
+};
+
+#define CAM_LRME_INPUT_PORT_TYPE_TAR (1 << 0)
+#define CAM_LRME_INPUT_PORT_TYPE_REF (1 << 1)
+
+#define CAM_LRME_OUTPUT_PORT_TYPE_DS2 (1 << 0)
+#define CAM_LRME_OUTPUT_PORT_TYPE_RES (1 << 1)
+
+#define CAM_LRME_DEV_MAX 1
+
+
+struct cam_lrme_hw_version {
+	uint32_t gen;
+	uint32_t rev;
+	uint32_t step;
+};
+
+struct cam_lrme_dev_cap {
+	struct cam_lrme_hw_version clc_hw_version;
+	struct cam_lrme_hw_version bus_rd_hw_version;
+	struct cam_lrme_hw_version bus_wr_hw_version;
+	struct cam_lrme_hw_version top_hw_version;
+	struct cam_lrme_hw_version top_titan_version;
+};
+
+/**
+ * struct cam_lrme_query_cap_cmd - LRME query device capability payload
+ *
+ * @dev_iommu_handle: LRME iommu handles for secure/non secure
+ *      modes
+ * @cdm_iommu_handle: Iommu handles for secure/non secure modes
+ * @num_devices: number of hardware devices
+ * @dev_caps: Returned device capability array
+ */
+struct cam_lrme_query_cap_cmd {
+	struct cam_iommu_handle device_iommu;
+	struct cam_iommu_handle cdm_iommu;
+	uint32_t num_devices;
+	struct cam_lrme_dev_cap dev_caps[CAM_LRME_DEV_MAX];
+};
+
+struct cam_lrme_soc_info {
+	uint64_t clock_rate;
+	uint64_t bandwidth;
+	uint64_t reserved[4];
+};
+
+struct cam_lrme_acquire_args {
+	struct cam_lrme_soc_info lrme_soc_info;
+};
+
+#endif /* __UAPI_CAM_LRME_H__ */
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
new file mode 100644
index 0000000..13a9d23
--- /dev/null
+++ b/include/uapi/media/cam_req_mgr.h
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_LINUX_CAM_REQ_MGR_H
+#define __UAPI_LINUX_CAM_REQ_MGR_H
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/media.h>
+#include <media/cam_defs.h>
+
+#define CAM_REQ_MGR_VNODE_NAME "cam-req-mgr-devnode"
+
+#define CAM_DEVICE_TYPE_BASE      (MEDIA_ENT_F_OLD_BASE)
+#define CAM_VNODE_DEVICE_TYPE     (CAM_DEVICE_TYPE_BASE)
+#define CAM_SENSOR_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 1)
+#define CAM_IFE_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 2)
+#define CAM_ICP_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 3)
+#define CAM_LRME_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 4)
+#define CAM_JPEG_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 5)
+#define CAM_FD_DEVICE_TYPE        (CAM_DEVICE_TYPE_BASE + 6)
+#define CAM_CPAS_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 7)
+#define CAM_CSIPHY_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 8)
+#define CAM_ACTUATOR_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 9)
+#define CAM_CCI_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 10)
+#define CAM_FLASH_DEVICE_TYPE     (CAM_DEVICE_TYPE_BASE + 11)
+#define CAM_EEPROM_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 12)
+#define CAM_OIS_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 13)
+
+/* cam_req_mgr hdl info */
+#define CAM_REQ_MGR_HDL_IDX_POS           8
+#define CAM_REQ_MGR_HDL_IDX_MASK          ((1 << CAM_REQ_MGR_HDL_IDX_POS) - 1)
+#define CAM_REQ_MGR_GET_HDL_IDX(hdl)      (hdl & CAM_REQ_MGR_HDL_IDX_MASK)
+
+/**
+ * Max handles supported by cam_req_mgr
+ * It includes both session and device handles
+ */
+#define CAM_REQ_MGR_MAX_HANDLES           64
+#define MAX_LINKS_PER_SESSION             2
+
+/* V4L event type which user space will subscribe to */
+#define V4L_EVENT_CAM_REQ_MGR_EVENT       (V4L2_EVENT_PRIVATE_START + 0)
+
+/* Specific event ids to get notified in user space */
+#define V4L_EVENT_CAM_REQ_MGR_SOF            0
+#define V4L_EVENT_CAM_REQ_MGR_ERROR          1
+#define V4L_EVENT_CAM_REQ_MGR_SOF_BOOT_TS    2
+
+/* SOF Event status */
+#define CAM_REQ_MGR_SOF_EVENT_SUCCESS           0
+#define CAM_REQ_MGR_SOF_EVENT_ERROR             1
+
+/* Link control operations */
+#define CAM_REQ_MGR_LINK_ACTIVATE               0
+#define CAM_REQ_MGR_LINK_DEACTIVATE             1
+
+/**
+ * Request Manager : flush_type
+ * @CAM_REQ_MGR_FLUSH_TYPE_ALL: Req mgr will remove all the pending
+ * requests from input/processing queue.
+ * @CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ: Req mgr will remove only particular
+ * request id from input/processing queue.
+ * @CAM_REQ_MGR_FLUSH_TYPE_MAX: Max number of the flush type
+ * @opcode: CAM_REQ_MGR_FLUSH_REQ
+ */
+#define CAM_REQ_MGR_FLUSH_TYPE_ALL          0
+#define CAM_REQ_MGR_FLUSH_TYPE_CANCEL_REQ   1
+#define CAM_REQ_MGR_FLUSH_TYPE_MAX          2
+
+/**
+ * Request Manager : Sync Mode type
+ * @CAM_REQ_MGR_SYNC_MODE_NO_SYNC: Req mgr will apply non-sync mode for this
+ * request.
+ * @CAM_REQ_MGR_SYNC_MODE_SYNC: Req mgr will apply sync mode for this request.
+ */
+#define CAM_REQ_MGR_SYNC_MODE_NO_SYNC   0
+#define CAM_REQ_MGR_SYNC_MODE_SYNC      1
+
+/**
+ * struct cam_req_mgr_event_data
+ * @session_hdl: session handle
+ * @link_hdl: link handle
+ * @frame_id: frame id
+ * @reserved: reserved for 64 bit aligngment
+ * @req_id: request id
+ * @tv_sec: timestamp in seconds
+ * @tv_usec: timestamp in micro seconds
+ */
+struct cam_req_mgr_event_data {
+	int32_t   session_hdl;
+	int32_t   link_hdl;
+	int32_t   frame_id;
+	int32_t   reserved;
+	int64_t   req_id;
+	uint64_t  tv_sec;
+	uint64_t  tv_usec;
+};
+
+/**
+ * struct cam_req_mgr_session_info
+ * @session_hdl: In/Output param - session_handle
+ * @opcode1: CAM_REQ_MGR_CREATE_SESSION
+ * @opcode2: CAM_REQ_MGR_DESTROY_SESSION
+ */
+struct cam_req_mgr_session_info {
+	int32_t session_hdl;
+	int32_t reserved;
+};
+
+/**
+ * struct cam_req_mgr_link_info
+ * @session_hdl: Input param - Identifier for CSL session
+ * @num_devices: Input Param - Num of devices to be linked
+ * @dev_hdls: Input param - List of device handles to be linked
+ * @link_hdl: Output Param -Identifier for link
+ * @opcode: CAM_REQ_MGR_LINK
+ */
+struct cam_req_mgr_link_info {
+	int32_t session_hdl;
+	uint32_t num_devices;
+	int32_t dev_hdls[CAM_REQ_MGR_MAX_HANDLES];
+	int32_t link_hdl;
+};
+
+/**
+ * struct cam_req_mgr_unlink_info
+ * @session_hdl: input param - session handle
+ * @link_hdl: input param - link handle
+ * @opcode: CAM_REQ_MGR_UNLINK
+ */
+struct cam_req_mgr_unlink_info {
+	int32_t session_hdl;
+	int32_t link_hdl;
+};
+
+/**
+ * struct cam_req_mgr_flush_info
+ * @brief: User can tell drivers to flush a particular request id or
+ * flush all requests from its pending processing queue. Flush is a
+ * blocking call and driver shall ensure all requests are flushed
+ * before returning.
+ * @session_hdl: Input param - Identifier for CSL session
+ * @link_hdl: Input Param -Identifier for link
+ * @flush_type: User can cancel a particular req id or can flush
+ * all requests in queue
+ * @reserved: reserved for 64 bit aligngment
+ * @req_id: field is valid only if flush type is cancel request
+ * for flush all this field value is not considered.
+ * @opcode: CAM_REQ_MGR_FLUSH_REQ
+ */
+struct cam_req_mgr_flush_info {
+	int32_t session_hdl;
+	int32_t link_hdl;
+	uint32_t flush_type;
+	uint32_t reserved;
+	int64_t req_id;
+};
+
+/** struct cam_req_mgr_sched_info
+ * @session_hdl: Input param - Identifier for CSL session
+ * @link_hdl: Input Param -Identifier for link
+ * inluding itself.
+ * @bubble_enable: Input Param - Cam req mgr will do bubble recovery if this
+ * flag is set.
+ * @sync_mode: Type of Sync mode for this request
+ * @req_id: Input Param - Request Id from which all requests will be flushed
+ */
+struct cam_req_mgr_sched_request {
+	int32_t session_hdl;
+	int32_t link_hdl;
+	int32_t bubble_enable;
+	int32_t sync_mode;
+	int64_t req_id;
+};
+
+/**
+ * struct cam_req_mgr_sync_mode
+ * @session_hdl:         Input param - Identifier for CSL session
+ * @sync_mode:           Input Param - Type of sync mode
+ * @num_links:           Input Param - Num of links in sync mode (Valid only
+ *                             when sync_mode is one of SYNC enabled modes)
+ * @link_hdls:           Input Param - Array of link handles to be in sync mode
+ *                             (Valid only when sync_mode is one of SYNC
+ *                             enabled modes)
+ * @master_link_hdl:     Input Param - To dictate which link's SOF drives system
+ *                             (Valid only when sync_mode is one of SYNC
+ *                             enabled modes)
+ *
+ * @opcode: CAM_REQ_MGR_SYNC_MODE
+ */
+struct cam_req_mgr_sync_mode {
+	int32_t session_hdl;
+	int32_t sync_mode;
+	int32_t num_links;
+	int32_t link_hdls[MAX_LINKS_PER_SESSION];
+	int32_t master_link_hdl;
+	int32_t reserved;
+};
+
+/**
+ * struct cam_req_mgr_link_control
+ * @ops:                 Link operations: activate/deactive
+ * @session_hdl:         Input param - Identifier for CSL session
+ * @num_links:           Input Param - Num of links
+ * @reserved:            reserved field
+ * @link_hdls:           Input Param - Links to be activated/deactivated
+ *
+ * @opcode: CAM_REQ_MGR_LINK_CONTROL
+ */
+struct cam_req_mgr_link_control {
+	int32_t ops;
+	int32_t session_hdl;
+	int32_t num_links;
+	int32_t reserved;
+	int32_t link_hdls[MAX_LINKS_PER_SESSION];
+};
+
+/**
+ * cam_req_mgr specific opcode ids
+ */
+#define CAM_REQ_MGR_CREATE_DEV_NODES            (CAM_COMMON_OPCODE_MAX + 1)
+#define CAM_REQ_MGR_CREATE_SESSION              (CAM_COMMON_OPCODE_MAX + 2)
+#define CAM_REQ_MGR_DESTROY_SESSION             (CAM_COMMON_OPCODE_MAX + 3)
+#define CAM_REQ_MGR_LINK                        (CAM_COMMON_OPCODE_MAX + 4)
+#define CAM_REQ_MGR_UNLINK                      (CAM_COMMON_OPCODE_MAX + 5)
+#define CAM_REQ_MGR_SCHED_REQ                   (CAM_COMMON_OPCODE_MAX + 6)
+#define CAM_REQ_MGR_FLUSH_REQ                   (CAM_COMMON_OPCODE_MAX + 7)
+#define CAM_REQ_MGR_SYNC_MODE                   (CAM_COMMON_OPCODE_MAX + 8)
+#define CAM_REQ_MGR_ALLOC_BUF                   (CAM_COMMON_OPCODE_MAX + 9)
+#define CAM_REQ_MGR_MAP_BUF                     (CAM_COMMON_OPCODE_MAX + 10)
+#define CAM_REQ_MGR_RELEASE_BUF                 (CAM_COMMON_OPCODE_MAX + 11)
+#define CAM_REQ_MGR_CACHE_OPS                   (CAM_COMMON_OPCODE_MAX + 12)
+#define CAM_REQ_MGR_LINK_CONTROL                (CAM_COMMON_OPCODE_MAX + 13)
+/* end of cam_req_mgr opcodes */
+
+#define CAM_MEM_FLAG_HW_READ_WRITE              (1<<0)
+#define CAM_MEM_FLAG_HW_READ_ONLY               (1<<1)
+#define CAM_MEM_FLAG_HW_WRITE_ONLY              (1<<2)
+#define CAM_MEM_FLAG_KMD_ACCESS                 (1<<3)
+#define CAM_MEM_FLAG_UMD_ACCESS                 (1<<4)
+#define CAM_MEM_FLAG_PROTECTED_MODE             (1<<5)
+#define CAM_MEM_FLAG_CMD_BUF_TYPE               (1<<6)
+#define CAM_MEM_FLAG_PIXEL_BUF_TYPE             (1<<7)
+#define CAM_MEM_FLAG_STATS_BUF_TYPE             (1<<8)
+#define CAM_MEM_FLAG_PACKET_BUF_TYPE            (1<<9)
+#define CAM_MEM_FLAG_CACHE                      (1<<10)
+#define CAM_MEM_FLAG_HW_SHARED_ACCESS           (1<<11)
+
+#define CAM_MEM_MMU_MAX_HANDLE                  16
+
+/* Maximum allowed buffers in existence */
+#define CAM_MEM_BUFQ_MAX                        1024
+
+#define CAM_MEM_MGR_SECURE_BIT_POS              15
+#define CAM_MEM_MGR_HDL_IDX_SIZE                15
+#define CAM_MEM_MGR_HDL_FD_SIZE                 16
+#define CAM_MEM_MGR_HDL_IDX_END_POS             16
+#define CAM_MEM_MGR_HDL_FD_END_POS              32
+
+#define CAM_MEM_MGR_HDL_IDX_MASK      ((1 << CAM_MEM_MGR_HDL_IDX_SIZE) - 1)
+
+#define GET_MEM_HANDLE(idx, fd) \
+	((idx & CAM_MEM_MGR_HDL_IDX_MASK) | \
+	(fd << (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE))) \
+
+#define GET_FD_FROM_HANDLE(hdl) \
+	(hdl >> (CAM_MEM_MGR_HDL_FD_END_POS - CAM_MEM_MGR_HDL_FD_SIZE)) \
+
+#define CAM_MEM_MGR_GET_HDL_IDX(hdl) (hdl & CAM_MEM_MGR_HDL_IDX_MASK)
+
+#define CAM_MEM_MGR_SET_SECURE_HDL(hdl, flag) \
+	((flag) ? (hdl |= (1 << CAM_MEM_MGR_SECURE_BIT_POS)) : \
+	((hdl) &= ~(1 << CAM_MEM_MGR_SECURE_BIT_POS)))
+
+#define CAM_MEM_MGR_IS_SECURE_HDL(hdl) \
+	(((hdl) & \
+	(1<<CAM_MEM_MGR_SECURE_BIT_POS)) >> CAM_MEM_MGR_SECURE_BIT_POS)
+
+/**
+ * memory allocation type
+ */
+#define CAM_MEM_DMA_NONE                        0
+#define CAM_MEM_DMA_BIDIRECTIONAL               1
+#define CAM_MEM_DMA_TO_DEVICE                   2
+#define CAM_MEM_DMA_FROM_DEVICE                 3
+
+
+/**
+ * memory cache operation
+ */
+#define CAM_MEM_CLEAN_CACHE                     1
+#define CAM_MEM_INV_CACHE                       2
+#define CAM_MEM_CLEAN_INV_CACHE                 3
+
+
+/**
+ * struct cam_mem_alloc_out_params
+ * @buf_handle: buffer handle
+ * @fd: output buffer file descriptor
+ * @vaddr: virtual address pointer
+ */
+struct cam_mem_alloc_out_params {
+	uint32_t buf_handle;
+	int32_t fd;
+	uint64_t vaddr;
+};
+
+/**
+ * struct cam_mem_map_out_params
+ * @buf_handle: buffer handle
+ * @reserved: reserved for future
+ * @vaddr: virtual address pointer
+ */
+struct cam_mem_map_out_params {
+	uint32_t buf_handle;
+	uint32_t reserved;
+	uint64_t vaddr;
+};
+
+/**
+ * struct cam_mem_mgr_alloc_cmd
+ * @len: size of buffer to allocate
+ * @align: alignment of the buffer
+ * @mmu_hdls: array of mmu handles
+ * @num_hdl: number of handles
+ * @flags: flags of the buffer
+ * @out: out params
+ */
+/* CAM_REQ_MGR_ALLOC_BUF */
+struct cam_mem_mgr_alloc_cmd {
+	uint64_t len;
+	uint64_t align;
+	int32_t mmu_hdls[CAM_MEM_MMU_MAX_HANDLE];
+	uint32_t num_hdl;
+	uint32_t flags;
+	struct cam_mem_alloc_out_params out;
+};
+
+/**
+ * struct cam_mem_mgr_map_cmd
+ * @mmu_hdls: array of mmu handles
+ * @num_hdl: number of handles
+ * @flags: flags of the buffer
+ * @fd: output buffer file descriptor
+ * @reserved: reserved field
+ * @out: out params
+ */
+
+/* CAM_REQ_MGR_MAP_BUF */
+struct cam_mem_mgr_map_cmd {
+	int32_t mmu_hdls[CAM_MEM_MMU_MAX_HANDLE];
+	uint32_t num_hdl;
+	uint32_t flags;
+	int32_t fd;
+	uint32_t reserved;
+	struct cam_mem_map_out_params out;
+};
+
+/**
+ * struct cam_mem_mgr_map_cmd
+ * @buf_handle: buffer handle
+ * @reserved: reserved field
+ */
+/* CAM_REQ_MGR_RELEASE_BUF */
+struct cam_mem_mgr_release_cmd {
+	int32_t buf_handle;
+	uint32_t reserved;
+};
+
+/**
+ * struct cam_mem_mgr_map_cmd
+ * @buf_handle: buffer handle
+ * @ops: cache operations
+ */
+/* CAM_REQ_MGR_CACHE_OPS */
+struct cam_mem_cache_ops_cmd {
+	int32_t buf_handle;
+	uint32_t mem_cache_ops;
+};
+
+/**
+ * Request Manager : error message type
+ * @CAM_REQ_MGR_ERROR_TYPE_DEVICE: Device error message, fatal to session
+ * @CAM_REQ_MGR_ERROR_TYPE_REQUEST: Error on a single request, not fatal
+ * @CAM_REQ_MGR_ERROR_TYPE_BUFFER: Buffer was not filled, not fatal
+ */
+#define CAM_REQ_MGR_ERROR_TYPE_DEVICE           0
+#define CAM_REQ_MGR_ERROR_TYPE_REQUEST          1
+#define CAM_REQ_MGR_ERROR_TYPE_BUFFER           2
+
+/**
+ * struct cam_req_mgr_error_msg
+ * @error_type: type of error
+ * @request_id: request id of frame
+ * @device_hdl: device handle
+ * @linke_hdl: link_hdl
+ * @resource_size: size of the resource
+ */
+struct cam_req_mgr_error_msg {
+	uint32_t error_type;
+	uint32_t request_id;
+	int32_t device_hdl;
+	int32_t link_hdl;
+	uint64_t resource_size;
+};
+
+/**
+ * struct cam_req_mgr_frame_msg
+ * @request_id: request id of the frame
+ * @frame_id: frame id of the frame
+ * @timestamp: timestamp of the frame
+ * @link_hdl: link handle associated with this message
+ * @sof_status: sof status success or fail
+ */
+struct cam_req_mgr_frame_msg {
+	uint64_t request_id;
+	uint64_t frame_id;
+	uint64_t timestamp;
+	int32_t  link_hdl;
+	uint32_t sof_status;
+};
+
+/**
+ * struct cam_req_mgr_message
+ * @session_hdl: session to which the frame belongs to
+ * @reserved: reserved field
+ * @u: union which can either be error or frame message
+ */
+struct cam_req_mgr_message {
+	int32_t session_hdl;
+	int32_t reserved;
+	union {
+		struct cam_req_mgr_error_msg err_msg;
+		struct cam_req_mgr_frame_msg frame_msg;
+	} u;
+};
+#endif /* __UAPI_LINUX_CAM_REQ_MGR_H */
diff --git a/include/uapi/media/cam_sensor.h b/include/uapi/media/cam_sensor.h
new file mode 100644
index 0000000..7b0bb4d
--- /dev/null
+++ b/include/uapi/media/cam_sensor.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_SENSOR_H__
+#define __UAPI_CAM_SENSOR_H__
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <media/cam_defs.h>
+
+#define CAM_SENSOR_PROBE_CMD   (CAM_COMMON_OPCODE_MAX + 1)
+#define CAM_FLASH_MAX_LED_TRIGGERS 3
+#define MAX_OIS_NAME_SIZE 32
+#define CAM_CSIPHY_SECURE_MODE_ENABLED 1
+/**
+ * struct cam_sensor_query_cap - capabilities info for sensor
+ *
+ * @slot_info        :  Indicates about the slotId or cell Index
+ * @secure_camera    :  Camera is in secure/Non-secure mode
+ * @pos_pitch        :  Sensor position pitch
+ * @pos_roll         :  Sensor position roll
+ * @pos_yaw          :  Sensor position yaw
+ * @actuator_slot_id :  Actuator slot id which connected to sensor
+ * @eeprom_slot_id   :  EEPROM slot id which connected to sensor
+ * @ois_slot_id      :  OIS slot id which connected to sensor
+ * @flash_slot_id    :  Flash slot id which connected to sensor
+ * @csiphy_slot_id   :  CSIphy slot id which connected to sensor
+ *
+ */
+struct  cam_sensor_query_cap {
+	uint32_t        slot_info;
+	uint32_t        secure_camera;
+	uint32_t        pos_pitch;
+	uint32_t        pos_roll;
+	uint32_t        pos_yaw;
+	uint32_t        actuator_slot_id;
+	uint32_t        eeprom_slot_id;
+	uint32_t        ois_slot_id;
+	uint32_t        flash_slot_id;
+	uint32_t        csiphy_slot_id;
+} __attribute__((packed));
+
+/**
+ * struct cam_csiphy_query_cap - capabilities info for csiphy
+ *
+ * @slot_info        :  Indicates about the slotId or cell Index
+ * @version          :  CSIphy version
+ * @clk lane         :  Of the 5 lanes, informs lane configured
+ *                      as clock lane
+ * @reserved
+ */
+struct cam_csiphy_query_cap {
+	uint32_t            slot_info;
+	uint32_t            version;
+	uint32_t            clk_lane;
+	uint32_t            reserved;
+} __attribute__((packed));
+
+/**
+ * struct cam_actuator_query_cap - capabilities info for actuator
+ *
+ * @slot_info        :  Indicates about the slotId or cell Index
+ * @reserved
+ */
+struct cam_actuator_query_cap {
+	uint32_t            slot_info;
+	uint32_t            reserved;
+} __attribute__((packed));
+
+/**
+ * struct cam_eeprom_query_cap_t - capabilities info for eeprom
+ *
+ * @slot_info                  :  Indicates about the slotId or cell Index
+ * @eeprom_kernel_probe        :  Indicates about the kernel or userspace probe
+ */
+struct cam_eeprom_query_cap_t {
+	uint32_t            slot_info;
+	uint16_t            eeprom_kernel_probe;
+	uint16_t            is_multimodule_mode;
+} __attribute__((packed));
+
+/**
+ * struct cam_ois_query_cap_t - capabilities info for ois
+ *
+ * @slot_info                  :  Indicates about the slotId or cell Index
+ */
+struct cam_ois_query_cap_t {
+	uint32_t            slot_info;
+	uint16_t            reserved;
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_i2c_info - Contains slave I2C related info
+ *
+ * @slave_addr      :    Slave address
+ * @i2c_freq_mode   :    4 bits are used for I2c freq mode
+ * @cmd_type        :    Explains type of command
+ */
+struct cam_cmd_i2c_info {
+	uint32_t    slave_addr;
+	uint8_t     i2c_freq_mode;
+	uint8_t     cmd_type;
+	uint16_t    reserved;
+} __attribute__((packed));
+
+/**
+ * struct cam_ois_opcode - Contains OIS opcode
+ *
+ * @prog            :    OIS FW prog register address
+ * @coeff           :    OIS FW coeff register address
+ * @pheripheral     :    OIS pheripheral
+ * @memory          :    OIS memory
+ */
+struct cam_ois_opcode {
+	uint32_t prog;
+	uint32_t coeff;
+	uint32_t pheripheral;
+	uint32_t memory;
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_ois_info - Contains OIS slave info
+ *
+ * @slave_addr            :    OIS i2c slave address
+ * @i2c_freq_mode         :    i2c frequency mode
+ * @cmd_type              :    Explains type of command
+ * @ois_fw_flag           :    indicates if fw is present or not
+ * @is_ois_calib          :    indicates the calibration data is available
+ * @ois_name              :    OIS name
+ * @opcode                :    opcode
+ */
+struct cam_cmd_ois_info {
+	uint16_t              slave_addr;
+	uint8_t               i2c_freq_mode;
+	uint8_t               cmd_type;
+	uint8_t               ois_fw_flag;
+	uint8_t               is_ois_calib;
+	char                  ois_name[MAX_OIS_NAME_SIZE];
+	struct cam_ois_opcode opcode;
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_probe - Contains sensor slave info
+ *
+ * @data_type       :   Slave register data type
+ * @addr_type       :   Slave register address type
+ * @op_code         :   Don't Care
+ * @cmd_type        :   Explains type of command
+ * @reg_addr        :   Slave register address
+ * @expected_data   :   Data expected at slave register address
+ * @data_mask       :   Data mask if only few bits are valid
+ * @camera_id       :   Indicates the slot to which camera
+ *                      needs to be probed
+ * @reserved
+ */
+struct cam_cmd_probe {
+	uint8_t     data_type;
+	uint8_t     addr_type;
+	uint8_t     op_code;
+	uint8_t     cmd_type;
+	uint32_t    reg_addr;
+	uint32_t    expected_data;
+	uint32_t    data_mask;
+	uint16_t    camera_id;
+	uint16_t    reserved;
+} __attribute__((packed));
+
+/**
+ * struct cam_power_settings - Contains sensor power setting info
+ *
+ * @power_seq_type  :   Type of power sequence
+ * @reserved
+ * @config_val_low  :   Lower 32 bit value configuration value
+ * @config_val_high :   Higher 32 bit value configuration value
+ *
+ */
+struct cam_power_settings {
+	uint16_t    power_seq_type;
+	uint16_t    reserved;
+	uint32_t    config_val_low;
+	uint32_t    config_val_high;
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_power - Explains about the power settings
+ *
+ * @count           :    Number of power settings follows
+ * @reserved
+ * @cmd_type        :    Explains type of command
+ * @power_settings  :    Contains power setting info
+ */
+struct cam_cmd_power {
+	uint32_t                    count;
+	uint8_t                     reserved;
+	uint8_t                     cmd_type;
+	uint16_t                    more_reserved;
+	struct cam_power_settings   power_settings[1];
+} __attribute__((packed));
+
+/**
+ * struct i2c_rdwr_header - header of READ/WRITE I2C command
+ *
+ * @ count           :   Number of registers / data / reg-data pairs
+ * @ op_code         :   Operation code
+ * @ cmd_type        :   Command buffer type
+ * @ data_type       :   I2C data type
+ * @ addr_type       :   I2C address type
+ * @ reserved
+ */
+struct i2c_rdwr_header {
+	uint32_t    count;
+	uint8_t     op_code;
+	uint8_t     cmd_type;
+	uint8_t     data_type;
+	uint8_t     addr_type;
+} __attribute__((packed));
+
+/**
+ * struct i2c_random_wr_payload - payload for I2C random write
+ *
+ * @ reg_addr        :   Register address
+ * @ reg_data        :   Register data
+ *
+ */
+struct i2c_random_wr_payload {
+	uint32_t     reg_addr;
+	uint32_t     reg_data;
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_i2c_random_wr - I2C random write command
+ * @ header            :   header of READ/WRITE I2C command
+ * @ random_wr_payload :   payload for I2C random write
+ */
+struct cam_cmd_i2c_random_wr {
+	struct i2c_rdwr_header       header;
+	struct i2c_random_wr_payload random_wr_payload[1];
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_read - I2C read command
+ * @ reg_data        :   Register data
+ * @ reserved
+ */
+struct cam_cmd_read {
+	uint32_t                reg_data;
+	uint32_t                reserved;
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_i2c_continuous_wr - I2C continuous write command
+ * @ header          :   header of READ/WRITE I2C command
+ * @ reg_addr        :   Register address
+ * @ data_read       :   I2C read command
+ */
+struct cam_cmd_i2c_continuous_wr {
+	struct i2c_rdwr_header  header;
+	uint32_t                reg_addr;
+	struct cam_cmd_read     data_read[1];
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_i2c_random_rd - I2C random read command
+ * @ header          :   header of READ/WRITE I2C command
+ * @ data_read       :   I2C read command
+ */
+struct cam_cmd_i2c_random_rd {
+	struct i2c_rdwr_header  header;
+	struct cam_cmd_read     data_read[1];
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_i2c_continuous_rd - I2C continuous continuous read command
+ * @ header          :   header of READ/WRITE I2C command
+ * @ reg_addr        :   Register address
+ *
+ */
+struct cam_cmd_i2c_continuous_rd {
+	struct i2c_rdwr_header  header;
+	uint32_t                reg_addr;
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_conditional_wait - Conditional wait command
+ * @data_type       :   Data type
+ * @addr_type       :   Address type
+ * @op_code         :   Opcode
+ * @cmd_type        :   Explains type of command
+ * @timeout         :   Timeout for retries
+ * @reserved
+ * @reg_addr        :   Register Address
+ * @reg_data        :   Register data
+ * @data_mask       :   Data mask if only few bits are valid
+ * @camera_id       :   Indicates the slot to which camera
+ *                      needs to be probed
+ *
+ */
+struct cam_cmd_conditional_wait {
+	uint8_t     data_type;
+	uint8_t     addr_type;
+	uint8_t     op_code;
+	uint8_t     cmd_type;
+	uint16_t    timeout;
+	uint16_t    reserved;
+	uint32_t    reg_addr;
+	uint32_t    reg_data;
+	uint32_t    data_mask;
+} __attribute__((packed));
+
+/**
+ * struct cam_cmd_unconditional_wait - Un-conditional wait command
+ * @delay           :   Delay
+ * @op_code         :   Opcode
+ * @cmd_type        :   Explains type of command
+ */
+struct cam_cmd_unconditional_wait {
+	int16_t     delay;
+	int16_t     reserved;
+	uint8_t     op_code;
+	uint8_t     cmd_type;
+	uint16_t    reserved1;
+} __attribute__((packed));
+
+/**
+ * cam_csiphy_info: Provides cmdbuffer structre
+ * @lane_mask     : Lane mask details
+ * @lane_assign   : Lane sensor will be using
+ * @csiphy_3phase : Total number of lanes
+ * @combo_mode    : Info regarding combo_mode is enable / disable
+ * @lane_cnt      : Total number of lanes
+ * @secure_mode   : Secure mode flag to enable / disable
+ * @3phase        : Details whether 3Phase / 2Phase operation
+ * @settle_time   : Settling time in ms
+ * @data_rate     : Data rate
+ *
+ */
+struct cam_csiphy_info {
+	uint16_t    lane_mask;
+	uint16_t    lane_assign;
+	uint8_t     csiphy_3phase;
+	uint8_t     combo_mode;
+	uint8_t     lane_cnt;
+	uint8_t     secure_mode;
+	uint64_t    settle_time;
+	uint64_t    data_rate;
+} __attribute__((packed));
+
+/**
+ * cam_csiphy_acquire_dev_info : Information needed for
+ *                        csiphy at the time of acquire
+ * @combo_mode     :    Indicates the device mode of operation
+ * @reserved
+ *
+ */
+struct cam_csiphy_acquire_dev_info {
+	uint32_t    combo_mode;
+	uint32_t    reserved;
+} __attribute__((packed));
+
+/**
+ * cam_sensor_acquire_dev : Updates sensor acuire cmd
+ * @device_handle  :    Updates device handle
+ * @session_handle :    Session handle for acquiring device
+ * @handle_type    :    Resource handle type
+ * @reserved
+ * @info_handle    :    Handle to additional info
+ *                      needed for sensor sub modules
+ *
+ */
+struct cam_sensor_acquire_dev {
+	uint32_t    session_handle;
+	uint32_t    device_handle;
+	uint32_t    handle_type;
+	uint32_t    reserved;
+	uint64_t    info_handle;
+} __attribute__((packed));
+
+/**
+ * cam_sensor_streamon_dev : StreamOn command for the sensor
+ * @session_handle :    Session handle for acquiring device
+ * @device_handle  :    Updates device handle
+ * @handle_type    :    Resource handle type
+ * @reserved
+ * @info_handle    :    Information Needed at the time of streamOn
+ *
+ */
+struct cam_sensor_streamon_dev {
+	uint32_t    session_handle;
+	uint32_t    device_handle;
+	uint32_t    handle_type;
+	uint32_t    reserved;
+	uint64_t    info_handle;
+} __attribute__((packed));
+
+/**
+ * struct cam_flash_init : Init command for the flash
+ * @flash_type  :    flash hw type
+ * @reserved
+ * @cmd_type    :    command buffer type
+ */
+struct cam_flash_init {
+	uint8_t     flash_type;
+	uint16_t    reserved;
+	uint8_t     cmd_type;
+} __attribute__((packed));
+
+/**
+ * struct cam_flash_set_rer : RedEyeReduction command buffer
+ *
+ * @count             :   Number of flash leds
+ * @opcode            :   Command buffer opcode
+ *			CAM_FLASH_FIRE_RER
+ * @cmd_type          :   command buffer operation type
+ * @num_iteration     :   Number of led turn on/off sequence
+ * @reserved
+ * @led_on_delay_ms   :   flash led turn on time in ms
+ * @led_off_delay_ms  :   flash led turn off time in ms
+ * @led_current_ma    :   flash led current in ma
+ *
+ */
+struct cam_flash_set_rer {
+	uint32_t    count;
+	uint8_t     opcode;
+	uint8_t     cmd_type;
+	uint16_t    num_iteration;
+	uint32_t    led_on_delay_ms;
+	uint32_t    led_off_delay_ms;
+	uint32_t    led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
+} __attribute__((packed));
+
+/**
+ * struct cam_flash_set_on_off : led turn on/off command buffer
+ *
+ * @count              :   Number of Flash leds
+ * @opcode             :   command buffer opcodes
+ *			CAM_FLASH_FIRE_LOW
+ *			CAM_FLASH_FIRE_HIGH
+ *			CAM_FLASH_OFF
+ * @cmd_type           :   command buffer operation type
+ * @led_current_ma     :   flash led current in ma
+ *
+ */
+struct cam_flash_set_on_off {
+	uint32_t    count;
+	uint8_t     opcode;
+	uint8_t     cmd_type;
+	uint16_t    reserved;
+	uint32_t    led_current_ma[CAM_FLASH_MAX_LED_TRIGGERS];
+} __attribute__((packed));
+
+/**
+ * struct cam_flash_query_curr : query current command buffer
+ *
+ * @reserved
+ * @opcode            :   command buffer opcode
+ * @cmd_type          :   command buffer operation type
+ * @query_current_ma  :   battery current in ma
+ *
+ */
+struct cam_flash_query_curr {
+	uint16_t    reserved;
+	uint8_t     opcode;
+	uint8_t     cmd_type;
+	uint32_t    query_current_ma;
+} __attribute__ ((packed));
+
+/**
+ * struct cam_flash_query_cap  :  capabilities info for flash
+ *
+ * @slot_info           :  Indicates about the slotId or cell Index
+ * @max_current_flash   :  max supported current for flash
+ * @max_duration_flash  :  max flash turn on duration
+ * @max_current_torch   :  max supported current for torch
+ *
+ */
+struct cam_flash_query_cap_info {
+	uint32_t    slot_info;
+	uint32_t    max_current_flash[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t    max_duration_flash[CAM_FLASH_MAX_LED_TRIGGERS];
+	uint32_t    max_current_torch[CAM_FLASH_MAX_LED_TRIGGERS];
+} __attribute__ ((packed));
+
+#endif
diff --git a/include/uapi/media/cam_sync.h b/include/uapi/media/cam_sync.h
new file mode 100644
index 0000000..924822d
--- /dev/null
+++ b/include/uapi/media/cam_sync.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __UAPI_CAM_SYNC_H__
+#define __UAPI_CAM_SYNC_H__
+
+#include <linux/videodev2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/media.h>
+
+#define CAM_SYNC_DEVICE_NAME                     "cam_sync_device"
+
+/* V4L event which user space will subscribe to */
+#define CAM_SYNC_V4L_EVENT                       (V4L2_EVENT_PRIVATE_START + 0)
+
+/* Specific event ids to get notified in user space */
+#define CAM_SYNC_V4L_EVENT_ID_CB_TRIG            0
+
+/* Size of opaque payload sent to kernel for safekeeping until signal time */
+#define CAM_SYNC_USER_PAYLOAD_SIZE               2
+
+/* Device type for sync device needed for device discovery */
+#define CAM_SYNC_DEVICE_TYPE                     (MEDIA_ENT_F_OLD_BASE)
+
+#define CAM_SYNC_GET_PAYLOAD_PTR(ev, type)       \
+	(type *)((char *)ev.u.data + sizeof(struct cam_sync_ev_header))
+
+#define CAM_SYNC_GET_HEADER_PTR(ev)              \
+	((struct cam_sync_ev_header *)ev.u.data)
+
+#define CAM_SYNC_STATE_INVALID                   0
+#define CAM_SYNC_STATE_ACTIVE                    1
+#define CAM_SYNC_STATE_SIGNALED_SUCCESS          2
+#define CAM_SYNC_STATE_SIGNALED_ERROR            3
+
+/**
+ * struct cam_sync_ev_header - Event header for sync event notification
+ *
+ * @sync_obj: Sync object
+ * @status:   Status of the object
+ */
+struct cam_sync_ev_header {
+	int32_t sync_obj;
+	int32_t status;
+};
+
+/**
+ * struct cam_sync_info - Sync object creation information
+ *
+ * @name:       Optional string representation of the sync object
+ * @sync_obj:   Sync object returned after creation in kernel
+ */
+struct cam_sync_info {
+	char name[64];
+	int32_t sync_obj;
+};
+
+/**
+ * struct cam_sync_signal - Sync object signaling struct
+ *
+ * @sync_obj:   Sync object to be signaled
+ * @sync_state: State of the sync object to which it should be signaled
+ */
+struct cam_sync_signal {
+	int32_t sync_obj;
+	uint32_t sync_state;
+};
+
+/**
+ * struct cam_sync_merge - Merge information for sync objects
+ *
+ * @sync_objs:  Pointer to sync objects
+ * @num_objs:   Number of objects in the array
+ * @merged:     Merged sync object
+ */
+struct cam_sync_merge {
+	__u64 sync_objs;
+	uint32_t num_objs;
+	int32_t merged;
+};
+
+/**
+ * struct cam_sync_userpayload_info - Payload info from user space
+ *
+ * @sync_obj:   Sync object for which payload has to be registered for
+ * @reserved:   Reserved
+ * @payload:    Pointer to user payload
+ */
+struct cam_sync_userpayload_info {
+	int32_t sync_obj;
+	uint32_t reserved;
+	__u64 payload[CAM_SYNC_USER_PAYLOAD_SIZE];
+};
+
+/**
+ * struct cam_sync_wait - Sync object wait information
+ *
+ * @sync_obj:   Sync object to wait on
+ * @reserved:   Reserved
+ * @timeout_ms: Timeout in milliseconds
+ */
+struct cam_sync_wait {
+	int32_t sync_obj;
+	uint32_t reserved;
+	uint64_t timeout_ms;
+};
+
+/**
+ * struct cam_private_ioctl_arg - Sync driver ioctl argument
+ *
+ * @id:         IOCTL command id
+ * @size:       Size of command payload
+ * @result:     Result of command execution
+ * @reserved:   Reserved
+ * @ioctl_ptr:  Pointer to user data
+ */
+struct cam_private_ioctl_arg {
+	__u32 id;
+	__u32 size;
+	__u32 result;
+	__u32 reserved;
+	__u64 ioctl_ptr;
+};
+
+#define CAM_PRIVATE_IOCTL_CMD \
+	_IOWR('V', BASE_VIDIOC_PRIVATE, struct cam_private_ioctl_arg)
+
+#define CAM_SYNC_CREATE                          0
+#define CAM_SYNC_DESTROY                         1
+#define CAM_SYNC_SIGNAL                          2
+#define CAM_SYNC_MERGE                           3
+#define CAM_SYNC_REGISTER_PAYLOAD                4
+#define CAM_SYNC_DEREGISTER_PAYLOAD              5
+#define CAM_SYNC_WAIT                            6
+
+#endif /* __UAPI_CAM_SYNC_H__ */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a4a4fc1..91c716a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9942,6 +9942,7 @@
 	if (!group_leader)
 		group_leader = event;
 
+	mutex_init(&event->group_leader_mutex);
 	mutex_init(&event->child_mutex);
 	INIT_LIST_HEAD(&event->child_list);
 
@@ -10472,6 +10473,16 @@
 			group_leader = NULL;
 	}
 
+	/*
+	 * Take the group_leader's group_leader_mutex before observing
+	 * anything in the group leader that leads to changes in ctx,
+	 * many of which may be changing on another thread.
+	 * In particular, we want to take this lock before deciding
+	 * whether we need to move_group.
+	 */
+	if (group_leader)
+		mutex_lock(&group_leader->group_leader_mutex);
+
 	if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
 		task = find_lively_task_by_vpid(pid);
 		if (IS_ERR(task)) {
@@ -10773,6 +10784,8 @@
 	if (move_group)
 		perf_event_ctx_unlock(group_leader, gctx);
 	mutex_unlock(&ctx->mutex);
+	if (group_leader)
+		mutex_unlock(&group_leader->group_leader_mutex);
 
 	if (task) {
 		mutex_unlock(&task->signal->cred_guard_mutex);
@@ -10816,6 +10829,8 @@
 	if (task)
 		put_task_struct(task);
 err_group_fd:
+	if (group_leader)
+		mutex_unlock(&group_leader->group_leader_mutex);
 	fdput(group);
 err_fd:
 	put_unused_fd(event_fd);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8c7635e..c23e0b0 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1673,6 +1673,22 @@
 		.extra2		= (void *)&mmap_rnd_compat_bits_max,
 	},
 #endif
+#ifdef CONFIG_SWAP
+	{
+		.procname	= "swap_ratio",
+		.data		= &sysctl_swap_ratio,
+		.maxlen		= sizeof(sysctl_swap_ratio),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+	},
+	{
+		.procname	= "swap_ratio_enable",
+		.data		= &sysctl_swap_ratio_enable,
+		.maxlen		= sizeof(sysctl_swap_ratio_enable),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+	},
+#endif
 	{ }
 };
 
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 5b33e2f..85eb868 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1387,3 +1387,8 @@
 	tick_nohz_switch_to_nohz();
 	return 0;
 }
+
+ktime_t *get_next_event_cpu(unsigned int cpu)
+{
+	return &(per_cpu(tick_cpu_device, cpu).evtdev->next_event);
+}
diff --git a/mm/Makefile b/mm/Makefile
index 7332f89..8aa143d 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -54,7 +54,7 @@
 endif
 obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
 
-obj-$(CONFIG_SWAP)	+= page_io.o swap_state.o swapfile.o swap_slots.o
+obj-$(CONFIG_SWAP)	+= page_io.o swap_state.o swapfile.o swap_slots.o swap_ratio.o
 obj-$(CONFIG_FRONTSWAP)	+= frontswap.o
 obj-$(CONFIG_ZSWAP)	+= zswap.o
 obj-$(CONFIG_HAS_DMA)	+= dmapool.o
diff --git a/mm/swap_ratio.c b/mm/swap_ratio.c
new file mode 100644
index 0000000..577b8c1
--- /dev/null
+++ b/mm/swap_ratio.c
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/mm_types.h>
+#include <linux/swapfile.h>
+#include <linux/swap.h>
+
+#define SWAP_RATIO_GROUP_START (SWAP_FLAG_PRIO_MASK - 9) /* 32758 */
+#define SWAP_RATIO_GROUP_END (SWAP_FLAG_PRIO_MASK) /* 32767 */
+#define SWAP_FAST_WRITES (SWAPFILE_CLUSTER * (SWAP_CLUSTER_MAX / 8))
+#define SWAP_SLOW_WRITES SWAPFILE_CLUSTER
+
+/*
+ * The fast/slow swap write ratio.
+ * 100 indicates that all writes should
+ * go to fast swap device.
+ */
+int sysctl_swap_ratio = 100;
+
+/* Enable the swap ratio feature */
+int sysctl_swap_ratio_enable;
+
+static bool is_same_group(struct swap_info_struct *a,
+		struct swap_info_struct *b)
+{
+	if (!sysctl_swap_ratio_enable)
+		return false;
+
+	if (!is_swap_ratio_group(a->prio))
+		return false;
+
+	if (a->prio == b->prio)
+		return true;
+
+	return false;
+}
+
+/* Caller must hold swap_avail_lock */
+static int calculate_write_pending(struct swap_info_struct *si,
+			struct swap_info_struct *n)
+{
+	int ratio = sysctl_swap_ratio;
+
+	if ((ratio < 0) || (ratio > 100))
+		return -EINVAL;
+
+	if (WARN_ON(!(si->flags & SWP_SYNCHRONOUS_IO)))
+		return -ENODEV;
+
+	if ((n->flags & SWP_SYNCHRONOUS_IO) || !is_same_group(si, n))
+		return -ENODEV;
+
+	si->max_writes = ratio ? SWAP_FAST_WRITES : 0;
+	n->max_writes  = ratio ? (SWAP_FAST_WRITES * 100) /
+			ratio - SWAP_FAST_WRITES : SWAP_SLOW_WRITES;
+
+	si->write_pending = si->max_writes;
+	n->write_pending = n->max_writes;
+
+	return 0;
+}
+
+static int swap_ratio_slow(struct swap_info_struct **si, int node)
+{
+	struct swap_info_struct *n = NULL;
+	int ret = 0;
+
+	spin_lock(&(*si)->lock);
+	spin_lock(&swap_avail_lock);
+	if (&(*si)->avail_lists[node] == plist_last(&swap_avail_heads[node])) {
+		/* just to make skip work */
+		n = *si;
+		ret = -ENODEV;
+		goto skip;
+	}
+	n = plist_next_entry(&(*si)->avail_lists[node],
+			struct swap_info_struct,
+			avail_lists[node]);
+	spin_unlock(&swap_avail_lock);
+	spin_lock(&n->lock);
+	spin_lock(&swap_avail_lock);
+
+	if ((*si)->flags & SWP_SYNCHRONOUS_IO) {
+		if ((*si)->write_pending) {
+			(*si)->write_pending--;
+			goto exit;
+		} else {
+			if ((n->flags & SWP_SYNCHRONOUS_IO) ||
+			     !is_same_group(*si, n)) {
+				/* Should never happen */
+				ret = -ENODEV;
+			} else if (n->write_pending) {
+				/*
+				 * Requeue fast device, since there are pending
+				 * writes for slow device.
+				 */
+				plist_requeue(&(*si)->avail_lists[node],
+					&swap_avail_heads[node]);
+				n->write_pending--;
+				spin_unlock(&(*si)->lock);
+				*si = n;
+				goto skip;
+			} else {
+				if (calculate_write_pending(*si, n) < 0) {
+					ret = -ENODEV;
+					goto exit;
+				}
+				/* Restart from fast device */
+				(*si)->write_pending--;
+			}
+		}
+	} else {
+		if (!(n->flags & SWP_SYNCHRONOUS_IO) ||
+		      !is_same_group(*si, n)) {
+			/* Should never happen */
+			ret = -ENODEV;
+		} else if (n->write_pending) {
+			/*
+			 * Pending writes for fast device.
+			 * We reach here when slow device is swapped on first,
+			 * before fast device.
+			 */
+			/* requeue slow device to the end */
+			plist_requeue(&(*si)->avail_lists[node],
+					&swap_avail_heads[node]);
+			n->write_pending--;
+			spin_unlock(&(*si)->lock);
+			*si = n;
+			goto skip;
+		} else {
+			if ((*si)->write_pending) {
+				(*si)->write_pending--;
+			} else {
+				if (calculate_write_pending(n, *si) < 0) {
+					ret = -ENODEV;
+					goto exit;
+				}
+				n->write_pending--;
+				plist_requeue(&(*si)->avail_lists[node],
+					&swap_avail_heads[node]);
+				spin_unlock(&(*si)->lock);
+				*si = n;
+				goto skip;
+			}
+		}
+	}
+exit:
+	spin_unlock(&(*si)->lock);
+skip:
+	spin_unlock(&swap_avail_lock);
+	/* n and si would have got interchanged */
+	spin_unlock(&n->lock);
+	return ret;
+}
+
+bool is_swap_ratio_group(int prio)
+{
+	return ((prio >= SWAP_RATIO_GROUP_START) &&
+		(prio <= SWAP_RATIO_GROUP_END)) ? true : false;
+}
+
+void setup_swap_ratio(struct swap_info_struct *p, int prio)
+{
+	/* Used only if sysctl_swap_ratio_enable is set */
+	if (is_swap_ratio_group(prio)) {
+		if (p->flags & SWP_SYNCHRONOUS_IO)
+			p->write_pending = SWAP_FAST_WRITES;
+		else
+			p->write_pending = SWAP_SLOW_WRITES;
+		p->max_writes =  p->write_pending;
+	}
+}
+
+int swap_ratio(struct swap_info_struct **si, int node)
+{
+	if (is_swap_ratio_group((*si)->prio))
+		return swap_ratio_slow(si, node);
+	else
+		return -ENODEV;
+}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d954b71..b9ad9c3 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -85,8 +85,8 @@
  * is held and the locking order requires swap_lock to be taken
  * before any swap_info_struct->lock.
  */
-static struct plist_head *swap_avail_heads;
-static DEFINE_SPINLOCK(swap_avail_lock);
+struct plist_head *swap_avail_heads;
+DEFINE_SPINLOCK(swap_avail_lock);
 
 struct swap_info_struct *swap_info[MAX_SWAPFILES];
 
@@ -947,6 +947,7 @@
 	long avail_pgs;
 	int n_ret = 0;
 	int node;
+	int swap_ratio_off = 0;
 
 	/* Only single cluster request supported */
 	WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER);
@@ -963,14 +964,34 @@
 
 	atomic_long_sub(n_goal * size, &nr_swap_pages);
 
+lock_and_start:
 	spin_lock(&swap_avail_lock);
 
 start_over:
 	node = numa_node_id();
 	plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
+
+		if (sysctl_swap_ratio && !swap_ratio_off) {
+			int ret;
+
+			spin_unlock(&swap_avail_lock);
+			ret = swap_ratio(&si, node);
+			if (ret < 0) {
+				/*
+				 * Error. Start again with swap
+				 * ratio disabled.
+				 */
+				swap_ratio_off = 1;
+				goto lock_and_start;
+			} else {
+				goto start;
+			}
+		}
+
 		/* requeue si to after same-priority siblings */
 		plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
 		spin_unlock(&swap_avail_lock);
+start:
 		spin_lock(&si->lock);
 		if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) {
 			spin_lock(&swap_avail_lock);
@@ -3270,9 +3291,11 @@
 
 	mutex_lock(&swapon_mutex);
 	prio = -1;
-	if (swap_flags & SWAP_FLAG_PREFER)
+	if (swap_flags & SWAP_FLAG_PREFER) {
 		prio =
 		  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
+		setup_swap_ratio(p, prio);
+	}
 	enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
 
 	pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
diff --git a/net/core/dev.c b/net/core/dev.c
index 93243479..570a7c4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -145,6 +145,8 @@
 #include <linux/sctp.h>
 #include <net/udp_tunnel.h>
 #include <linux/net_namespace.h>
+#include <linux/tcp.h>
+#include <net/tcp.h>
 
 #include "net-sysfs.h"
 
@@ -3305,6 +3307,10 @@
 	if (netif_needs_gso(skb, features)) {
 		struct sk_buff *segs;
 
+		__be16 src_port = tcp_hdr(skb)->source;
+		__be16 dest_port = tcp_hdr(skb)->dest;
+
+		trace_print_skb_gso(skb, src_port, dest_port);
 		segs = skb_gso_segment(skb, features);
 		if (IS_ERR(segs)) {
 			goto out_kfree_skb;
@@ -5302,6 +5308,7 @@
 	}
 
 out:
+	__this_cpu_add(softnet_data.gro_coalesced, NAPI_GRO_CB(skb)->count > 1);
 	return netif_receive_skb_internal(skb);
 }
 
@@ -5352,6 +5359,7 @@
 		unsigned long diffs;
 
 		NAPI_GRO_CB(p)->flush = 0;
+		NAPI_GRO_CB(p)->flush_id = 0;
 
 		if (hash != skb_get_hash_raw(p)) {
 			NAPI_GRO_CB(p)->same_flow = 0;
@@ -5464,7 +5472,6 @@
 		NAPI_GRO_CB(skb)->encap_mark = 0;
 		NAPI_GRO_CB(skb)->recursion_counter = 0;
 		NAPI_GRO_CB(skb)->is_fou = 0;
-		NAPI_GRO_CB(skb)->is_atomic = 1;
 		NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
 
 		/* Setup for GRO checksum validation */
@@ -5769,8 +5776,14 @@
 	while (remsd) {
 		struct softnet_data *next = remsd->rps_ipi_next;
 
-		if (cpu_online(remsd->cpu))
+		if (cpu_online(remsd->cpu)) {
 			smp_call_function_single_async(remsd->cpu, &remsd->csd);
+		} else {
+			pr_err("%s() cpu offline\n", __func__);
+			rps_lock(remsd);
+			remsd->backlog.state = 0;
+			rps_unlock(remsd);
+		}
 		remsd = next;
 	}
 #endif
@@ -5830,8 +5843,7 @@
 			rcu_read_unlock();
 			input_queue_head_incr(sd);
 			if (++work >= quota)
-				return work;
-
+				goto state_changed;
 		}
 
 		local_irq_disable();
@@ -5855,6 +5867,10 @@
 		local_irq_enable();
 	}
 
+state_changed:
+	napi_gro_flush(napi, false);
+	sd->current_napi = NULL;
+
 	return work;
 }
 
@@ -5947,9 +5963,12 @@
 			napi_gro_flush(n, false);
 	}
 	if (unlikely(!list_empty(&n->poll_list))) {
+		struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
 		/* If n->poll_list is not empty, we need to mask irqs */
 		local_irq_save(flags);
 		list_del_init(&n->poll_list);
+		sd->current_napi = NULL;
 		local_irq_restore(flags);
 	}
 
@@ -6227,6 +6246,14 @@
 }
 EXPORT_SYMBOL(netif_napi_del);
 
+struct napi_struct *get_current_napi_context(void)
+{
+	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+	return sd->current_napi;
+}
+EXPORT_SYMBOL(get_current_napi_context);
+
 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 {
 	void *have;
@@ -6246,6 +6273,9 @@
 	 */
 	work = 0;
 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+		struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+		sd->current_napi = n;
 		work = n->poll(n, weight);
 		trace_napi_poll(n, work, weight);
 	}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 63881f7..ff868ae 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -159,12 +159,12 @@
 	rcu_read_unlock();
 #endif
 
-	seq_printf(seq,
-		   "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
-		   sd->processed, sd->dropped, sd->time_squeeze, 0,
-		   0, 0, 0, 0, /* was fastroute */
-		   0,	/* was cpu_collision */
-		   sd->received_rps, flow_limit_count);
+	seq_printf
+	(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+	 sd->processed, sd->dropped, sd->time_squeeze, 0,
+	 0, 0, 0, 0, /* was fastroute */
+	 0, /* was cpu_collision */
+	 sd->received_rps, flow_limit_count, sd->gro_coalesced);
 	return 0;
 }
 
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 66c70a1..f6f6dfa 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -138,6 +138,8 @@
 }
 #endif
 
+int sysctl_reserved_port_bind __read_mostly = 1;
+
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
  */
@@ -1448,7 +1450,6 @@
 
 	list_for_each_entry(p, head, list) {
 		struct iphdr *iph2;
-		u16 flush_id;
 
 		if (!NAPI_GRO_CB(p)->same_flow)
 			continue;
@@ -1474,34 +1475,23 @@
 
 		NAPI_GRO_CB(p)->flush |= flush;
 
-		/* We need to store of the IP ID check to be included later
-		 * when we can verify that this packet does in fact belong
-		 * to a given flow.
+		/* For non-atomic datagrams we need to save the IP ID offset
+		 * to be included later.  If the frame has the DF bit set
+		 * we must ignore the IP ID value as per RFC 6864.
 		 */
-		flush_id = (u16)(id - ntohs(iph2->id));
+		if (iph2->frag_off & htons(IP_DF))
+			continue;
 
-		/* This bit of code makes it much easier for us to identify
-		 * the cases where we are doing atomic vs non-atomic IP ID
-		 * checks.  Specifically an atomic check can return IP ID
-		 * values 0 - 0xFFFF, while a non-atomic check can only
-		 * return 0 or 0xFFFF.
+		/* We must save the offset as it is possible to have multiple
+		 * flows using the same protocol and address pairs so we
+		 * need to wait until we can validate this is part of the
+		 * same flow with a 5-tuple or better to avoid unnecessary
+		 * collisions between flows.
 		 */
-		if (!NAPI_GRO_CB(p)->is_atomic ||
-		    !(iph->frag_off & htons(IP_DF))) {
-			flush_id ^= NAPI_GRO_CB(p)->count;
-			flush_id = flush_id ? 0xFFFF : 0;
-		}
-
-		/* If the previous IP ID value was based on an atomic
-		 * datagram we can overwrite the value and ignore it.
-		 */
-		if (NAPI_GRO_CB(skb)->is_atomic)
-			NAPI_GRO_CB(p)->flush_id = flush_id;
-		else
-			NAPI_GRO_CB(p)->flush_id |= flush_id;
+		NAPI_GRO_CB(p)->flush_id |= ntohs(iph2->id) ^
+					    (u16)(id - NAPI_GRO_CB(p)->count);
 	}
 
-	NAPI_GRO_CB(skb)->is_atomic = !!(iph->frag_off & htons(IP_DF));
 	NAPI_GRO_CB(skb)->flush |= flush;
 	skb_set_network_header(skb, off);
 	/* The above will be needed by the transport layer if there is one
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 15e7f79..6a56ec8 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -305,6 +305,13 @@
 	head = &hinfo->bhash[inet_bhashfn(net, port,
 					  hinfo->bhash_size)];
 	spin_lock_bh(&head->lock);
+
+	if (inet_is_local_reserved_port(net, snum) &&
+	    !sysctl_reserved_port_bind) {
+		ret = 1;
+		goto fail_unlock;
+	}
+
 	inet_bind_bucket_for_each(tb, &head->chain)
 		if (net_eq(ib_net(tb), net) && tb->port == port)
 			goto tb_found;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 314ea31..8ee8ad0 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -683,6 +683,13 @@
 		.proc_handler	= proc_do_large_bitmap,
 	},
 	{
+		.procname       = "reserved_port_bind",
+		.data           = &sysctl_reserved_port_bind,
+		.maxlen         = sizeof(int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec
+	},
+	{
 		.procname	= "ip_no_pmtu_disc",
 		.data		= &init_net.ipv4.sysctl_ip_no_pmtu_disc,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0c65fc9..3147931 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2309,6 +2309,7 @@
 	__be32 src = inet->inet_rcv_saddr;
 	__u16 destp = ntohs(inet->inet_dport);
 	__u16 srcp = ntohs(inet->inet_sport);
+	__u8 seq_state = sk->sk_state;
 	int rx_queue;
 	int state;
 
@@ -2328,6 +2329,9 @@
 		timer_expires = jiffies;
 	}
 
+	if (inet->transparent)
+		seq_state |= 0x80;
+
 	state = inet_sk_state_load(sk);
 	if (state == TCP_LISTEN)
 		rx_queue = sk->sk_ack_backlog;
@@ -2339,7 +2343,7 @@
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
-		i, src, srcp, dest, destp, state,
+		i, src, srcp, dest, destp, seq_state,
 		tp->write_seq - tp->snd_una,
 		rx_queue,
 		timer_active,
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index 870b0a3..365ee69 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -238,7 +238,7 @@
 
 found:
 	/* Include the IP ID check below from the inner most IP hdr */
-	flush = NAPI_GRO_CB(p)->flush;
+	flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
 	flush |= (__force int)(flags & TCP_FLAG_CWR);
 	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
 		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
@@ -247,17 +247,6 @@
 		flush |= *(u32 *)((u8 *)th + i) ^
 			 *(u32 *)((u8 *)th2 + i);
 
-	/* When we receive our second frame we can made a decision on if we
-	 * continue this flow as an atomic flow with a fixed ID or if we use
-	 * an incrementing ID.
-	 */
-	if (NAPI_GRO_CB(p)->flush_id != 1 ||
-	    NAPI_GRO_CB(p)->count != 1 ||
-	    !NAPI_GRO_CB(p)->is_atomic)
-		flush |= NAPI_GRO_CB(p)->flush_id;
-	else
-		NAPI_GRO_CB(p)->is_atomic = false;
-
 	mss = skb_shinfo(p)->gso_size;
 
 	flush |= (len - 1) >= mss;
@@ -327,9 +316,6 @@
 				  iph->daddr, 0);
 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
-	if (NAPI_GRO_CB(skb)->is_atomic)
-		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
-
 	return tcp_gro_complete(skb);
 }
 
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index c32a4c1..090144c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -288,6 +288,11 @@
 	} else {
 		hslot = udp_hashslot(udptable, net, snum);
 		spin_lock_bh(&hslot->lock);
+
+		if (inet_is_local_reserved_port(net, snum) &&
+		    !sysctl_reserved_port_bind)
+			goto fail_unlock;
+
 		if (hslot->count > 10) {
 			int exist;
 			unsigned int slot2 = udp_sk(sk)->udp_portaddr_hash ^ snum;
@@ -2762,14 +2767,21 @@
 		int bucket)
 {
 	struct inet_sock *inet = inet_sk(sp);
+	struct udp_sock *up = udp_sk(sp);
 	__be32 dest = inet->inet_daddr;
 	__be32 src  = inet->inet_rcv_saddr;
 	__u16 destp	  = ntohs(inet->inet_dport);
 	__u16 srcp	  = ntohs(inet->inet_sport);
+	__u8 state = sp->sk_state;
+
+	if (up->encap_rcv)
+		state |= 0xF0;
+	else if (inet->transparent)
+		state |= 0x80;
 
 	seq_printf(f, "%5d: %08X:%04X %08X:%04X"
 		" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
-		bucket, src, srcp, dest, destp, sp->sk_state,
+		bucket, src, srcp, dest, destp, state,
 		sk_wmem_alloc_get(sp),
 		udp_rqueue_get(sp),
 		0, 0L, 0,
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 1ede7a1..f31fe86 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -1026,9 +1026,14 @@
 			       __u16 srcp, __u16 destp, int rqueue, int bucket)
 {
 	const struct in6_addr *dest, *src;
+	__u8 state = sp->sk_state;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
+
+	if (inet_sk(sp) && inet_sk(sp)->transparent)
+		state |= 0x80;
+
 	seq_printf(seq,
 		   "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
@@ -1037,7 +1042,7 @@
 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
-		   sp->sk_state,
+		   state,
 		   sk_wmem_alloc_get(sp),
 		   rqueue,
 		   0, 0L, 0,
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index c7e495f..236c96b 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -240,15 +240,8 @@
 		/* flush if Traffic Class fields are different */
 		NAPI_GRO_CB(p)->flush |= !!(first_word & htonl(0x0FF00000));
 		NAPI_GRO_CB(p)->flush |= flush;
-
-		/* If the previous IP ID value was based on an atomic
-		 * datagram we can overwrite the value and ignore it.
-		 */
-		if (NAPI_GRO_CB(skb)->is_atomic)
-			NAPI_GRO_CB(p)->flush_id = 0;
 	}
 
-	NAPI_GRO_CB(skb)->is_atomic = true;
 	NAPI_GRO_CB(skb)->flush |= flush;
 
 	skb_gro_postpull_rcsum(skb, iph, nlen);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 03e6b7a..7a2659d 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1809,6 +1809,7 @@
 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
 	int rx_queue;
 	int state;
+	__u8 state_seq = sp->sk_state;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
@@ -1840,6 +1841,9 @@
 		 */
 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
+	if (inet->transparent)
+		state_seq |= 0x80;
+
 	seq_printf(seq,
 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1848,7 +1852,7 @@
 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
-		   state,
+		   state_seq,
 		   tp->write_seq - tp->snd_una,
 		   rx_queue,
 		   timer_active,
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 673860a..d4ec96b 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -76,6 +76,7 @@
 	bool send_nl_msg;
 	bool active;
 	uid_t uid;
+	bool suspend_time_valid;
 };
 
 static LIST_HEAD(idletimer_tg_list);
@@ -245,8 +246,13 @@
 	switch (pm_event) {
 	case PM_SUSPEND_PREPARE:
 		get_monotonic_boottime(&timer->last_suspend_time);
+		timer->suspend_time_valid = true;
 		break;
 	case PM_POST_SUSPEND:
+		if (!timer->suspend_time_valid)
+			break;
+		timer->suspend_time_valid = false;
+
 		spin_lock_bh(&timestamp_lock);
 		if (!timer->active) {
 			spin_unlock_bh(&timestamp_lock);
@@ -281,7 +287,7 @@
 {
 	int ret;
 
-	info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+	info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
 	if (!info->timer) {
 		ret = -ENOMEM;
 		goto out;
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
index 1944834..20f5ea8 100644
--- a/net/qrtr/Kconfig
+++ b/net/qrtr/Kconfig
@@ -28,4 +28,14 @@
 	  implement endpoints of QRTR, for purpose of tunneling data to other
 	  hosts or testing purposes.
 
+config QRTR_MHI
+	tristate "MHI IPC Router channels"
+	depends on MHI_BUS || (COMPILE_TEST && MHI_BUS=n)
+	help
+	  Say Y here to support MHI based ipcrouter channels.  MHI is the
+	  transport used for external modem connections for IPC Router. The
+	  MHI bus provides an asynchronous send mechanism, the QRTR MHI
+	  transport will wait for the uplink callback notification before
+	  returning to qrtr.
+
 endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index be012bf..f3f7036 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -4,3 +4,5 @@
 qrtr-smd-y	:= smd.o
 obj-$(CONFIG_QRTR_TUN) += qrtr-tun.o
 qrtr-tun-y	:= tun.o
+obj-$(CONFIG_QRTR_MHI) += qrtr-mhi.o
+qrtr-mhi-y	:= mhi.o
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
new file mode 100644
index 0000000..e070278
--- /dev/null
+++ b/net/qrtr/mhi.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mhi.h>
+#include <net/sock.h>
+
+#include "qrtr.h"
+
+struct qrtr_mhi_dev {
+	struct qrtr_endpoint ep;
+	struct mhi_device *mhi_dev;
+	struct device *dev;
+	spinlock_t ul_lock;		/* lock to protect ul_pkts */
+	struct list_head ul_pkts;
+};
+
+struct qrtr_mhi_pkt {
+	struct list_head node;
+	struct sk_buff *skb;
+	struct kref refcount;
+	struct completion done;
+};
+
+static void qrtr_mhi_pkt_release(struct kref *ref)
+{
+	struct qrtr_mhi_pkt *pkt = container_of(ref, struct qrtr_mhi_pkt,
+						refcount);
+	struct sock *sk = pkt->skb->sk;
+
+	consume_skb(pkt->skb);
+	if (sk)
+		sock_put(sk);
+	kfree(pkt);
+}
+
+/* from mhi to qrtr */
+static void qcom_mhi_qrtr_dl_callback(struct mhi_device *mhi_dev,
+				      struct mhi_result *mhi_res)
+{
+	struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
+	int rc;
+
+	if (!qdev || mhi_res->transaction_status)
+		return;
+
+	rc = qrtr_endpoint_post(&qdev->ep, mhi_res->buf_addr,
+				mhi_res->bytes_xferd);
+	if (rc == -EINVAL)
+		dev_err(qdev->dev, "invalid ipcrouter packet\n");
+}
+
+/* from mhi to qrtr */
+static void qcom_mhi_qrtr_ul_callback(struct mhi_device *mhi_dev,
+				      struct mhi_result *mhi_res)
+{
+	struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
+	struct qrtr_mhi_pkt *pkt;
+
+	spin_lock_bh(&qdev->ul_lock);
+	pkt = list_first_entry(&qdev->ul_pkts, struct qrtr_mhi_pkt, node);
+	list_del(&pkt->node);
+	complete_all(&pkt->done);
+
+	kref_put(&pkt->refcount, qrtr_mhi_pkt_release);
+	spin_unlock_bh(&qdev->ul_lock);
+}
+
+/* from qrtr to mhi */
+static int qcom_mhi_qrtr_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+{
+	struct qrtr_mhi_dev *qdev = container_of(ep, struct qrtr_mhi_dev, ep);
+	struct qrtr_mhi_pkt *pkt;
+	int rc;
+
+	rc = skb_linearize(skb);
+	if (rc) {
+		kfree_skb(skb);
+		return rc;
+	}
+
+	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+	if (!pkt) {
+		kfree_skb(skb);
+		return -ENOMEM;
+	}
+
+	init_completion(&pkt->done);
+	kref_init(&pkt->refcount);
+	kref_get(&pkt->refcount);
+	pkt->skb = skb;
+
+	spin_lock_bh(&qdev->ul_lock);
+	list_add_tail(&pkt->node, &qdev->ul_pkts);
+	rc = mhi_queue_transfer(qdev->mhi_dev, DMA_TO_DEVICE, skb, skb->len,
+				MHI_EOT);
+	if (rc) {
+		list_del(&pkt->node);
+		kfree_skb(skb);
+		kfree(pkt);
+		spin_unlock_bh(&qdev->ul_lock);
+		return rc;
+	}
+	spin_unlock_bh(&qdev->ul_lock);
+	if (skb->sk)
+		sock_hold(skb->sk);
+
+	rc = wait_for_completion_interruptible_timeout(&pkt->done, HZ * 5);
+	if (rc > 0)
+		rc = 0;
+	else if (rc == 0)
+		rc = -ETIMEDOUT;
+
+	kref_put(&pkt->refcount, qrtr_mhi_pkt_release);
+	return rc;
+}
+
+static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
+			       const struct mhi_device_id *id)
+{
+	struct qrtr_mhi_dev *qdev;
+	int rc;
+
+	qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
+	if (!qdev)
+		return -ENOMEM;
+
+	qdev->mhi_dev = mhi_dev;
+	qdev->dev = &mhi_dev->dev;
+	qdev->ep.xmit = qcom_mhi_qrtr_send;
+
+	INIT_LIST_HEAD(&qdev->ul_pkts);
+	spin_lock_init(&qdev->ul_lock);
+
+	rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+	if (rc)
+		return rc;
+
+	dev_set_drvdata(&mhi_dev->dev, qdev);
+
+	dev_dbg(qdev->dev, "QTI MHI QRTR driver probed\n");
+
+	return 0;
+}
+
+static void qcom_mhi_qrtr_remove(struct mhi_device *mhi_dev)
+{
+	struct qrtr_mhi_dev *qdev = dev_get_drvdata(&mhi_dev->dev);
+
+	qrtr_endpoint_unregister(&qdev->ep);
+	dev_set_drvdata(&mhi_dev->dev, NULL);
+}
+
+static const struct mhi_device_id qcom_mhi_qrtr_mhi_match[] = {
+	{ .chan = "IPCR" },
+	{}
+};
+
+static struct mhi_driver qcom_mhi_qrtr_driver = {
+	.probe = qcom_mhi_qrtr_probe,
+	.remove = qcom_mhi_qrtr_remove,
+	.dl_xfer_cb = qcom_mhi_qrtr_dl_callback,
+	.ul_xfer_cb = qcom_mhi_qrtr_ul_callback,
+	.id_table = qcom_mhi_qrtr_mhi_match,
+	.driver = {
+		.name = "qcom_mhi_qrtr",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_driver(qcom_mhi_qrtr_driver, mhi_driver_register,
+	      mhi_driver_unregister);
+
+MODULE_DESCRIPTION("QTI IPC-Router MHI interface driver");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 86e1e37..ef52dd6 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2015, Sony Mobile Communications Inc.
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2018 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,11 +15,19 @@
 #include <linux/netlink.h>
 #include <linux/qrtr.h>
 #include <linux/termios.h>	/* For TIOCINQ/OUTQ */
+#include <linux/wait.h>
+#include <linux/rwsem.h>
+#include <linux/ipc_logging.h>
+#include <linux/uidgid.h>
 
 #include <net/sock.h>
 
 #include "qrtr.h"
 
+#define QRTR_LOG_PAGE_CNT 4
+#define QRTR_INFO(ctx, x, ...)				\
+	ipc_log_string(ctx, x, ##__VA_ARGS__)
+
 #define QRTR_PROTO_VER_1 1
 #define QRTR_PROTO_VER_2 3
 
@@ -27,6 +35,12 @@
 #define QRTR_MIN_EPH_SOCKET 0x4000
 #define QRTR_MAX_EPH_SOCKET 0x7fff
 
+/* qrtr socket states */
+#define QRTR_STATE_MULTI	-2
+#define QRTR_STATE_INIT	-1
+
+#define AID_VENDOR_QRTR	KGIDT_INIT(2906)
+
 /**
  * struct qrtr_hdr_v1 - (I|R)PCrouter packet header version 1
  * @version: protocol version
@@ -93,6 +107,8 @@
 	struct sock sk;
 	struct sockaddr_qrtr us;
 	struct sockaddr_qrtr peer;
+
+	int state;
 };
 
 static inline struct qrtr_sock *qrtr_sk(struct sock *sk)
@@ -101,14 +117,14 @@
 	return container_of(sk, struct qrtr_sock, sk);
 }
 
-static unsigned int qrtr_local_nid = -1;
+static unsigned int qrtr_local_nid = 1;
 
 /* for node ids */
 static RADIX_TREE(qrtr_nodes, GFP_KERNEL);
 /* broadcast list */
-static LIST_HEAD(qrtr_all_nodes);
-/* lock for qrtr_nodes, qrtr_all_nodes and node reference */
-static DEFINE_MUTEX(qrtr_node_lock);
+static LIST_HEAD(qrtr_all_epts);
+/* lock for qrtr_nodes, qrtr_all_epts and node reference */
+static DECLARE_RWSEM(qrtr_node_lock);
 
 /* local port allocation management */
 static DEFINE_IDR(qrtr_ports);
@@ -120,27 +136,163 @@
  * @ep: endpoint
  * @ref: reference count for node
  * @nid: node id
+ * @hello_sent: hello packet sent to endpoint
+ * @qrtr_tx_flow: remote port tx flow control list
+ * @resume_tx: wait until remote port acks control flag
+ * @qrtr_tx_lock: lock for qrtr_tx_flow
  * @rx_queue: receive queue
  * @work: scheduled work struct for recv work
  * @item: list item for broadcast list
+ * @ilc: ipc logging context reference
  */
 struct qrtr_node {
 	struct mutex ep_lock;
 	struct qrtr_endpoint *ep;
 	struct kref ref;
 	unsigned int nid;
+	atomic_t hello_sent;
+
+	struct radix_tree_root qrtr_tx_flow;
+	struct wait_queue_head resume_tx;
+	struct mutex qrtr_tx_lock;	/* for qrtr_tx_flow */
 
 	struct sk_buff_head rx_queue;
 	struct work_struct work;
 	struct list_head item;
+
+	void *ilc;
 };
 
+struct qrtr_tx_flow_waiter {
+	struct list_head node;
+	struct sock *sk;
+};
+
+struct qrtr_tx_flow {
+	atomic_t pending;
+	struct list_head waiters;
+};
+
+#define QRTR_TX_FLOW_HIGH	10
+#define QRTR_TX_FLOW_LOW	5
+
+static struct sk_buff *qrtr_alloc_ctrl_packet(struct qrtr_ctrl_pkt **pkt);
 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 			      int type, struct sockaddr_qrtr *from,
-			      struct sockaddr_qrtr *to);
+			      struct sockaddr_qrtr *to, unsigned int flags);
 static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 			      int type, struct sockaddr_qrtr *from,
-			      struct sockaddr_qrtr *to);
+			      struct sockaddr_qrtr *to, unsigned int flags);
+
+static void qrtr_log_tx_msg(struct qrtr_node *node, struct qrtr_hdr_v1 *hdr,
+			    struct sk_buff *skb)
+{
+	const struct qrtr_ctrl_pkt *pkt;
+	u64 pl_buf = 0;
+
+	if (!hdr || !skb || !skb->data)
+		return;
+
+	if (hdr->type == QRTR_TYPE_DATA) {
+		pl_buf = *(u64 *)(skb->data + QRTR_HDR_MAX_SIZE);
+		QRTR_INFO(node->ilc,
+			  "TX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x] [%s]\n",
+			  hdr->size, hdr->confirm_rx,
+			  hdr->src_node_id, hdr->src_port_id,
+			  hdr->dst_node_id, hdr->dst_port_id,
+			  (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32),
+			  current->comm);
+	} else {
+		pkt = (struct qrtr_ctrl_pkt *)(skb->data + QRTR_HDR_MAX_SIZE);
+		if (hdr->type == QRTR_TYPE_NEW_SERVER ||
+		    hdr->type == QRTR_TYPE_DEL_SERVER)
+			QRTR_INFO(node->ilc,
+				  "TX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
+				  hdr->type, le32_to_cpu(pkt->server.service),
+				  le32_to_cpu(pkt->server.instance),
+				  le32_to_cpu(pkt->server.node),
+				  le32_to_cpu(pkt->server.port));
+		else if (hdr->type == QRTR_TYPE_DEL_CLIENT ||
+			 hdr->type == QRTR_TYPE_RESUME_TX)
+			QRTR_INFO(node->ilc,
+				  "TX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
+				  hdr->type, le32_to_cpu(pkt->client.node),
+				  le32_to_cpu(pkt->client.port));
+		else if (hdr->type == QRTR_TYPE_HELLO ||
+			 hdr->type == QRTR_TYPE_BYE)
+			QRTR_INFO(node->ilc,
+				  "TX CTRL: cmd:0x%x node[0x%x]\n",
+				  hdr->type, hdr->src_node_id);
+	}
+}
+
+static void qrtr_log_rx_msg(struct qrtr_node *node, struct sk_buff *skb)
+{
+	const struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_cb *cb;
+	u64 pl_buf = 0;
+
+	if (!skb || !skb->data)
+		return;
+
+	cb = (struct qrtr_cb *)skb->cb;
+
+	if (cb->type == QRTR_TYPE_DATA) {
+		pl_buf = *(u64 *)(skb->data);
+		QRTR_INFO(node->ilc,
+			  "RX DATA: Len:0x%x CF:0x%x src[0x%x:0x%x] dst[0x%x:0x%x] [%08x %08x]\n",
+			  skb->len, cb->confirm_rx, cb->src_node, cb->src_port,
+			  cb->dst_node, cb->dst_port,
+			  (unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
+	} else {
+		pkt = (struct qrtr_ctrl_pkt *)(skb->data);
+		if (cb->type == QRTR_TYPE_NEW_SERVER ||
+		    cb->type == QRTR_TYPE_DEL_SERVER)
+			QRTR_INFO(node->ilc,
+				  "RX CTRL: cmd:0x%x SVC[0x%x:0x%x] addr[0x%x:0x%x]\n",
+				  cb->type, le32_to_cpu(pkt->server.service),
+				  le32_to_cpu(pkt->server.instance),
+				  le32_to_cpu(pkt->server.node),
+				  le32_to_cpu(pkt->server.port));
+		else if (cb->type == QRTR_TYPE_DEL_CLIENT ||
+			 cb->type == QRTR_TYPE_RESUME_TX)
+			QRTR_INFO(node->ilc,
+				  "RX CTRL: cmd:0x%x addr[0x%x:0x%x]\n",
+				  cb->type, le32_to_cpu(pkt->client.node),
+				  le32_to_cpu(pkt->client.port));
+		else if (cb->type == QRTR_TYPE_HELLO ||
+			 cb->type == QRTR_TYPE_BYE)
+			QRTR_INFO(node->ilc,
+				  "RX CTRL: cmd:0x%x node[0x%x]\n",
+				  cb->type, cb->src_node);
+	}
+}
+
+static bool refcount_dec_and_rwsem_lock(refcount_t *r,
+					struct rw_semaphore *sem)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	down_write(sem);
+	if (!refcount_dec_and_test(r)) {
+		up_write(sem);
+		return false;
+	}
+
+	return true;
+}
+
+static inline int kref_put_rwsem_lock(struct kref *kref,
+				      void (*release)(struct kref *kref),
+				      struct rw_semaphore *sem)
+{
+	if (refcount_dec_and_rwsem_lock(&kref->refcount, sem)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
 
 /* Release node resources and free the node.
  *
@@ -149,14 +301,39 @@
  */
 static void __qrtr_node_release(struct kref *kref)
 {
+	struct qrtr_tx_flow_waiter *waiter;
+	struct qrtr_tx_flow_waiter *temp;
+	struct radix_tree_iter iter;
+	struct qrtr_tx_flow *flow;
 	struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
+	void __rcu **slot;
 
-	if (node->nid != QRTR_EP_NID_AUTO)
-		radix_tree_delete(&qrtr_nodes, node->nid);
+	if (node->nid != QRTR_EP_NID_AUTO) {
+		radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
+			if (node == *slot)
+				radix_tree_delete(&qrtr_nodes, iter.index);
+		}
+	}
 
 	list_del(&node->item);
-	mutex_unlock(&qrtr_node_lock);
+	up_write(&qrtr_node_lock);
 
+	/* Free tx flow counters */
+	mutex_lock(&node->qrtr_tx_lock);
+	radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
+		flow = *slot;
+		list_for_each_entry_safe(waiter, temp, &flow->waiters, node) {
+			list_del(&waiter->node);
+			sock_put(waiter->sk);
+			kfree(waiter);
+		}
+		kfree(flow);
+		radix_tree_delete(&node->qrtr_tx_flow, iter.index);
+	}
+	mutex_unlock(&node->qrtr_tx_lock);
+
+
+	flush_work(&node->work);
 	skb_queue_purge(&node->rx_queue);
 	kfree(node);
 }
@@ -174,18 +351,159 @@
 {
 	if (!node)
 		return;
-	kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
+	kref_put_rwsem_lock(&node->ref, __qrtr_node_release, &qrtr_node_lock);
+}
+
+/**
+ * qrtr_tx_resume() - reset flow control counter
+ * @node:	qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on
+ * @skb:	skb for resume tx control packet
+ */
+static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
+{
+	struct qrtr_tx_flow_waiter *waiter;
+	struct qrtr_tx_flow_waiter *temp;
+	struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_tx_flow *flow;
+	struct sockaddr_qrtr src;
+	struct qrtr_sock *ipc;
+	struct sk_buff *skbn;
+	unsigned long key;
+
+	pkt = (struct qrtr_ctrl_pkt *)skb->data;
+	if (le32_to_cpu(pkt->cmd) != QRTR_TYPE_RESUME_TX)
+		return;
+
+	src.sq_family = AF_QIPCRTR;
+	src.sq_node = le32_to_cpu(pkt->client.node);
+	src.sq_port = le32_to_cpu(pkt->client.port);
+	key = (u64)src.sq_node << 32 | src.sq_port;
+
+	flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
+	if (!flow)
+		return;
+
+	atomic_set(&flow->pending, 0);
+	wake_up_interruptible_all(&node->resume_tx);
+
+	mutex_lock(&node->qrtr_tx_lock);
+	list_for_each_entry_safe(waiter, temp, &flow->waiters, node) {
+		list_del(&waiter->node);
+		skbn = alloc_skb(0, GFP_KERNEL);
+		if (skbn) {
+			ipc = qrtr_sk(waiter->sk);
+			qrtr_local_enqueue(NULL, skbn, QRTR_TYPE_RESUME_TX,
+					   &src, &ipc->us, 0);
+		}
+		sock_put(waiter->sk);
+		kfree(waiter);
+	}
+	mutex_unlock(&node->qrtr_tx_lock);
+}
+
+/**
+ * qrtr_tx_wait() - flow control for outgoing packets
+ * @node:	qrtr_node that the packet is to be send to
+ * @dest_node:	node id of the destination
+ * @dest_port:	port number of the destination
+ * @type:	type of message
+ *
+ * The flow control scheme is based around the low and high "watermarks". When
+ * the low watermark is passed the confirm_rx flag is set on the outgoing
+ * message, which will trigger the remote to send a control message of the type
+ * QRTR_TYPE_RESUME_TX to reset the counter. If the high watermark is hit
+ * further transmision should be paused.
+ *
+ * Return: 1 if confirm_rx should be set, 0 otherwise or errno failure
+ */
+static int qrtr_tx_wait(struct qrtr_node *node, struct sockaddr_qrtr *to,
+			struct sock *sk, int type, unsigned int flags)
+{
+	struct qrtr_tx_flow_waiter *waiter;
+	struct qrtr_tx_flow *flow;
+	unsigned long key = (u64)to->sq_node << 32 | to->sq_port;
+	int confirm_rx = 0;
+	long timeo;
+	long ret;
+
+	/* Never set confirm_rx on non-data packets */
+	if (type != QRTR_TYPE_DATA)
+		return 0;
+
+	/* Assume sk is set correctly for all data type packets */
+	timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+
+	mutex_lock(&node->qrtr_tx_lock);
+	flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
+	if (!flow) {
+		flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+		if (!flow) {
+			mutex_unlock(&node->qrtr_tx_lock);
+			return 1;
+		}
+		INIT_LIST_HEAD(&flow->waiters);
+		radix_tree_insert(&node->qrtr_tx_flow, key, flow);
+	}
+	mutex_unlock(&node->qrtr_tx_lock);
+
+	for (;;) {
+		ret = wait_event_interruptible_timeout(
+				node->resume_tx,
+				!node->ep ||
+				atomic_read(&flow->pending) < QRTR_TX_FLOW_HIGH,
+				timeo);
+		if (ret < 0)
+			return ret;
+		if (!ret)
+			return -EAGAIN;
+
+		if (!node->ep)
+			return -EPIPE;
+
+		mutex_lock(&node->qrtr_tx_lock);
+		if (atomic_read(&flow->pending) < QRTR_TX_FLOW_HIGH) {
+			atomic_inc(&flow->pending);
+			confirm_rx = atomic_read(&flow->pending) ==
+				     QRTR_TX_FLOW_LOW;
+			mutex_unlock(&node->qrtr_tx_lock);
+			break;
+		}
+		mutex_unlock(&node->qrtr_tx_lock);
+	}
+
+	if (confirm_rx) {
+		waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
+		if (!waiter)
+			return -ENOMEM;
+		waiter->sk = sk;
+		sock_hold(sk);
+
+		mutex_lock(&node->qrtr_tx_lock);
+		list_add_tail(&waiter->node, &flow->waiters);
+		mutex_unlock(&node->qrtr_tx_lock);
+	}
+	return confirm_rx;
 }
 
 /* Pass an outgoing packet socket buffer to the endpoint driver. */
 static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 			     int type, struct sockaddr_qrtr *from,
-			     struct sockaddr_qrtr *to)
+			     struct sockaddr_qrtr *to, unsigned int flags)
 {
 	struct qrtr_hdr_v1 *hdr;
+	int confirm_rx;
 	size_t len = skb->len;
 	int rc = -ENODEV;
 
+	if (!atomic_read(&node->hello_sent) && type != QRTR_TYPE_HELLO)
+		return rc;
+
+	confirm_rx = qrtr_tx_wait(node, to, skb->sk, type, flags);
+	if (confirm_rx < 0) {
+		kfree_skb(skb);
+		return confirm_rx;
+	}
+
 	hdr = skb_push(skb, sizeof(*hdr));
 	hdr->version = cpu_to_le32(QRTR_PROTO_VER_1);
 	hdr->type = cpu_to_le32(type);
@@ -200,9 +518,10 @@
 	}
 
 	hdr->size = cpu_to_le32(len);
-	hdr->confirm_rx = 0;
+	hdr->confirm_rx = !!confirm_rx;
 
-	skb_put_padto(skb, ALIGN(len, 4));
+	skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
+	qrtr_log_tx_msg(node, hdr, skb);
 
 	mutex_lock(&node->ep_lock);
 	if (node->ep)
@@ -211,6 +530,9 @@
 		kfree_skb(skb);
 	mutex_unlock(&node->ep_lock);
 
+	if (!rc && type == QRTR_TYPE_HELLO)
+		atomic_inc(&node->hello_sent);
+
 	return rc;
 }
 
@@ -222,10 +544,10 @@
 {
 	struct qrtr_node *node;
 
-	mutex_lock(&qrtr_node_lock);
+	down_read(&qrtr_node_lock);
 	node = radix_tree_lookup(&qrtr_nodes, nid);
 	node = qrtr_node_acquire(node);
-	mutex_unlock(&qrtr_node_lock);
+	up_read(&qrtr_node_lock);
 
 	return node;
 }
@@ -237,13 +559,31 @@
  */
 static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
 {
-	if (node->nid != QRTR_EP_NID_AUTO || nid == QRTR_EP_NID_AUTO)
+	struct qrtr_node *tnode = NULL;
+	char name[32] = {0,};
+
+	if (nid == QRTR_EP_NID_AUTO)
+		return;
+	if (nid == node->nid)
 		return;
 
-	mutex_lock(&qrtr_node_lock);
+	down_read(&qrtr_node_lock);
+	tnode = radix_tree_lookup(&qrtr_nodes, nid);
+	up_read(&qrtr_node_lock);
+	if (tnode)
+		return;
+
+	down_write(&qrtr_node_lock);
 	radix_tree_insert(&qrtr_nodes, nid, node);
-	node->nid = nid;
-	mutex_unlock(&qrtr_node_lock);
+
+	if (node->nid == QRTR_EP_NID_AUTO)
+		node->nid = nid;
+	up_write(&qrtr_node_lock);
+
+	if (!node->ilc) {
+		snprintf(name, sizeof(name), "qrtr_%d", nid);
+		node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0);
+	}
 }
 
 /**
@@ -317,10 +657,12 @@
 	if (len != ALIGN(size, 4) + hdrlen)
 		goto err;
 
-	if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA)
+	if (cb->dst_port != QRTR_PORT_CTRL && cb->type != QRTR_TYPE_DATA &&
+	    cb->type != QRTR_TYPE_RESUME_TX)
 		goto err;
 
 	skb_put_data(skb, data + hdrlen, size);
+	qrtr_log_rx_msg(node, skb);
 
 	skb_queue_tail(&node->rx_queue, skb);
 	schedule_work(&node->work);
@@ -369,46 +711,34 @@
 {
 	struct qrtr_node *node = container_of(work, struct qrtr_node, work);
 	struct qrtr_ctrl_pkt *pkt;
-	struct sockaddr_qrtr dst;
-	struct sockaddr_qrtr src;
 	struct sk_buff *skb;
 
 	while ((skb = skb_dequeue(&node->rx_queue)) != NULL) {
 		struct qrtr_sock *ipc;
 		struct qrtr_cb *cb;
-		int confirm;
 
 		cb = (struct qrtr_cb *)skb->cb;
-		src.sq_node = cb->src_node;
-		src.sq_port = cb->src_port;
-		dst.sq_node = cb->dst_node;
-		dst.sq_port = cb->dst_port;
-		confirm = !!cb->confirm_rx;
-
 		qrtr_node_assign(node, cb->src_node);
 
-		ipc = qrtr_port_lookup(cb->dst_port);
-		if (!ipc) {
-			kfree_skb(skb);
-		} else {
-			if (sock_queue_rcv_skb(&ipc->sk, skb))
-				kfree_skb(skb);
-
-			qrtr_port_put(ipc);
+		if (cb->type == QRTR_TYPE_NEW_SERVER &&
+		    skb->len == sizeof(*pkt)) {
+			pkt = (void *)skb->data;
+			qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
 		}
 
-		if (confirm) {
-			skb = qrtr_alloc_ctrl_packet(&pkt);
-			if (!skb)
-				break;
+		if (cb->type == QRTR_TYPE_RESUME_TX) {
+			qrtr_tx_resume(node, skb);
+			consume_skb(skb);
+		} else {
+			ipc = qrtr_port_lookup(cb->dst_port);
+			if (!ipc) {
+				kfree_skb(skb);
+			} else {
+				if (sock_queue_rcv_skb(&ipc->sk, skb))
+					kfree_skb(skb);
 
-			pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
-			pkt->client.node = cpu_to_le32(dst.sq_node);
-			pkt->client.port = cpu_to_le32(dst.sq_port);
-
-			if (qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX,
-					      &dst, &src))
-				break;
+				qrtr_port_put(ipc);
+			}
 		}
 	}
 }
@@ -438,12 +768,17 @@
 	skb_queue_head_init(&node->rx_queue);
 	node->nid = QRTR_EP_NID_AUTO;
 	node->ep = ep;
+	atomic_set(&node->hello_sent, 0);
+
+	mutex_init(&node->qrtr_tx_lock);
+	INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
+	init_waitqueue_head(&node->resume_tx);
 
 	qrtr_node_assign(node, nid);
 
-	mutex_lock(&qrtr_node_lock);
-	list_add(&node->item, &qrtr_all_nodes);
-	mutex_unlock(&qrtr_node_lock);
+	down_write(&qrtr_node_lock);
+	list_add(&node->item, &qrtr_all_epts);
+	up_write(&qrtr_node_lock);
 	ep->node = node;
 
 	return 0;
@@ -456,22 +791,36 @@
  */
 void qrtr_endpoint_unregister(struct qrtr_endpoint *ep)
 {
+	struct radix_tree_iter iter;
 	struct qrtr_node *node = ep->node;
 	struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
 	struct sockaddr_qrtr dst = {AF_QIPCRTR, qrtr_local_nid, QRTR_PORT_CTRL};
 	struct qrtr_ctrl_pkt *pkt;
 	struct sk_buff *skb;
+	void __rcu **slot;
 
 	mutex_lock(&node->ep_lock);
 	node->ep = NULL;
 	mutex_unlock(&node->ep_lock);
 
 	/* Notify the local controller about the event */
-	skb = qrtr_alloc_ctrl_packet(&pkt);
-	if (skb) {
+	down_read(&qrtr_node_lock);
+	radix_tree_for_each_slot(slot, &qrtr_nodes, &iter, 0) {
+		if (node != *slot)
+			continue;
+
+		skb = qrtr_alloc_ctrl_packet(&pkt);
+		if (!skb)
+			continue;
+
+		src.sq_node = iter.index;
 		pkt->cmd = cpu_to_le32(QRTR_TYPE_BYE);
-		qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst);
+		qrtr_local_enqueue(NULL, skb, QRTR_TYPE_BYE, &src, &dst, 0);
 	}
+	up_read(&qrtr_node_lock);
+
+	/* Wake up any transmitters waiting for resume-tx from the node */
+	wake_up_interruptible_all(&node->resume_tx);
 
 	qrtr_node_release(node);
 	ep->node = NULL;
@@ -504,29 +853,59 @@
 	sock_put(&ipc->sk);
 }
 
-/* Remove port assignment. */
-static void qrtr_port_remove(struct qrtr_sock *ipc)
+static void qrtr_send_del_client(struct qrtr_sock *ipc)
 {
 	struct qrtr_ctrl_pkt *pkt;
-	struct sk_buff *skb;
-	int port = ipc->us.sq_port;
 	struct sockaddr_qrtr to;
+	struct qrtr_node *node;
+	struct sk_buff *skbn;
+	struct sk_buff *skb;
+	int type = QRTR_TYPE_DEL_CLIENT;
+
+	skb = qrtr_alloc_ctrl_packet(&pkt);
+	if (!skb)
+		return;
 
 	to.sq_family = AF_QIPCRTR;
 	to.sq_node = QRTR_NODE_BCAST;
 	to.sq_port = QRTR_PORT_CTRL;
 
-	skb = qrtr_alloc_ctrl_packet(&pkt);
-	if (skb) {
-		pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
-		pkt->client.node = cpu_to_le32(ipc->us.sq_node);
-		pkt->client.port = cpu_to_le32(ipc->us.sq_port);
+	pkt->cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT);
+	pkt->client.node = cpu_to_le32(ipc->us.sq_node);
+	pkt->client.port = cpu_to_le32(ipc->us.sq_port);
 
-		skb_set_owner_w(skb, &ipc->sk);
-		qrtr_bcast_enqueue(NULL, skb, QRTR_TYPE_DEL_CLIENT, &ipc->us,
-				   &to);
+	skb_set_owner_w(skb, &ipc->sk);
+
+	if (ipc->state == QRTR_STATE_MULTI) {
+		qrtr_bcast_enqueue(NULL, skb, type, &ipc->us, &to, 0);
+		return;
 	}
 
+	if (ipc->state > QRTR_STATE_INIT) {
+		node = qrtr_node_lookup(ipc->state);
+		if (!node)
+			goto exit;
+
+		skbn = skb_clone(skb, GFP_KERNEL);
+		if (!skbn) {
+			qrtr_node_release(node);
+			goto exit;
+		}
+
+		skb_set_owner_w(skbn, &ipc->sk);
+		qrtr_node_enqueue(node, skbn, type, &ipc->us, &to, 0);
+		qrtr_node_release(node);
+	}
+exit:
+	qrtr_local_enqueue(NULL, skb, type, &ipc->us, &to, 0);
+}
+
+/* Remove port assignment. */
+static void qrtr_port_remove(struct qrtr_sock *ipc)
+{
+	int port = ipc->us.sq_port;
+
+	qrtr_send_del_client(ipc);
 	if (port == QRTR_PORT_CTRL)
 		port = 0;
 
@@ -553,17 +932,18 @@
 
 	mutex_lock(&qrtr_port_lock);
 	if (!*port) {
-		rc = idr_alloc(&qrtr_ports, ipc,
-			       QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
-			       GFP_ATOMIC);
+		rc = idr_alloc_cyclic(&qrtr_ports, ipc, QRTR_MIN_EPH_SOCKET,
+				      QRTR_MAX_EPH_SOCKET + 1, GFP_ATOMIC);
 		if (rc >= 0)
 			*port = rc;
-	} else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
+	} else if (*port < QRTR_MIN_EPH_SOCKET &&
+		   !(capable(CAP_NET_ADMIN) || in_egroup_p(AID_VENDOR_QRTR))) {
 		rc = -EACCES;
 	} else if (*port == QRTR_PORT_CTRL) {
 		rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
 	} else {
-		rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
+		rc = idr_alloc_cyclic(&qrtr_ports, ipc, *port, *port + 1,
+				      GFP_ATOMIC);
 		if (rc >= 0)
 			*port = rc;
 	}
@@ -674,12 +1054,16 @@
 /* Queue packet to local peer socket. */
 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 			      int type, struct sockaddr_qrtr *from,
-			      struct sockaddr_qrtr *to)
+			      struct sockaddr_qrtr *to, unsigned int flags)
 {
 	struct qrtr_sock *ipc;
 	struct qrtr_cb *cb;
 
 	ipc = qrtr_port_lookup(to->sq_port);
+	if (!ipc && to->sq_port == QRTR_PORT_CTRL) {
+		kfree_skb(skb);
+		return 0;
+	}
 	if (!ipc || &ipc->sk == skb->sk) { /* do not send to self */
 		kfree_skb(skb);
 		return -ENODEV;
@@ -703,21 +1087,23 @@
 /* Queue packet for broadcast. */
 static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
 			      int type, struct sockaddr_qrtr *from,
-			      struct sockaddr_qrtr *to)
+			      struct sockaddr_qrtr *to, unsigned int flags)
 {
 	struct sk_buff *skbn;
 
-	mutex_lock(&qrtr_node_lock);
-	list_for_each_entry(node, &qrtr_all_nodes, item) {
+	down_read(&qrtr_node_lock);
+	list_for_each_entry(node, &qrtr_all_epts, item) {
+		if (node->nid == QRTR_EP_NID_AUTO)
+			continue;
 		skbn = skb_clone(skb, GFP_KERNEL);
 		if (!skbn)
 			break;
 		skb_set_owner_w(skbn, skb->sk);
-		qrtr_node_enqueue(node, skbn, type, from, to);
+		qrtr_node_enqueue(node, skbn, type, from, to, flags);
 	}
-	mutex_unlock(&qrtr_node_lock);
+	up_read(&qrtr_node_lock);
 
-	qrtr_local_enqueue(node, skb, type, from, to);
+	qrtr_local_enqueue(node, skb, type, from, to, flags);
 
 	return 0;
 }
@@ -726,7 +1112,8 @@
 {
 	DECLARE_SOCKADDR(struct sockaddr_qrtr *, addr, msg->msg_name);
 	int (*enqueue_fn)(struct qrtr_node *, struct sk_buff *, int,
-			  struct sockaddr_qrtr *, struct sockaddr_qrtr *);
+			  struct sockaddr_qrtr *, struct sockaddr_qrtr *,
+			  unsigned int);
 	struct qrtr_sock *ipc = qrtr_sk(sock->sk);
 	struct sock *sk = sock->sk;
 	struct qrtr_node *node;
@@ -782,6 +1169,11 @@
 			release_sock(sk);
 			return -ECONNRESET;
 		}
+
+		if (ipc->state > QRTR_STATE_INIT && ipc->state != node->nid)
+			ipc->state = QRTR_STATE_MULTI;
+		else if (ipc->state == QRTR_STATE_INIT)
+			ipc->state = node->nid;
 	}
 
 	plen = (len + 3) & ~3;
@@ -810,7 +1202,7 @@
 		type = le32_to_cpu(type);
 	}
 
-	rc = enqueue_fn(node, skb, type, &ipc->us, addr);
+	rc = enqueue_fn(node, skb, type, &ipc->us, addr, msg->msg_flags);
 	if (rc >= 0)
 		rc = len;
 
@@ -821,6 +1213,36 @@
 	return rc;
 }
 
+static int qrtr_resume_tx(struct qrtr_cb *cb)
+{
+	struct sockaddr_qrtr remote = { AF_QIPCRTR,
+					cb->src_node, cb->src_port };
+	struct sockaddr_qrtr local = { AF_QIPCRTR, cb->dst_node, cb->dst_port };
+	struct qrtr_ctrl_pkt *pkt;
+	struct qrtr_node *node;
+	struct sk_buff *skb;
+	int ret;
+
+	node = qrtr_node_lookup(remote.sq_node);
+	if (!node)
+		return -EINVAL;
+
+	skb = qrtr_alloc_ctrl_packet(&pkt);
+	if (!skb)
+		return -ENOMEM;
+
+	pkt->cmd = cpu_to_le32(QRTR_TYPE_RESUME_TX);
+	pkt->client.node = cpu_to_le32(cb->dst_node);
+	pkt->client.port = cpu_to_le32(cb->dst_port);
+
+	ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX,
+				&local, &remote, 0);
+
+	qrtr_node_release(node);
+
+	return ret;
+}
+
 static int qrtr_recvmsg(struct socket *sock, struct msghdr *msg,
 			size_t size, int flags)
 {
@@ -843,6 +1265,7 @@
 		release_sock(sk);
 		return rc;
 	}
+	cb = (struct qrtr_cb *)skb->cb;
 
 	copied = skb->len;
 	if (copied > size) {
@@ -856,7 +1279,6 @@
 	rc = copied;
 
 	if (addr) {
-		cb = (struct qrtr_cb *)skb->cb;
 		addr->sq_family = AF_QIPCRTR;
 		addr->sq_node = cb->src_node;
 		addr->sq_port = cb->src_port;
@@ -864,6 +1286,9 @@
 	}
 
 out:
+	if (cb->confirm_rx)
+		qrtr_resume_tx(cb);
+
 	skb_free_datagram(sk, skb);
 	release_sock(sk);
 
@@ -1006,7 +1431,7 @@
 	if (!sock_flag(sk, SOCK_DEAD))
 		sk->sk_state_change(sk);
 
-	sock_set_flag(sk, SOCK_DEAD);
+	sock_orphan(sk);
 	sock->sk = NULL;
 
 	if (!sock_flag(sk, SOCK_ZAPPED))
@@ -1069,6 +1494,7 @@
 	ipc->us.sq_family = AF_QIPCRTR;
 	ipc->us.sq_node = qrtr_local_nid;
 	ipc->us.sq_port = 0;
+	ipc->state = QRTR_STATE_INIT;
 
 	return 0;
 }
@@ -1087,9 +1513,6 @@
 	if (!netlink_capable(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (!netlink_capable(skb, CAP_SYS_ADMIN))
-		return -EPERM;
-
 	ASSERT_RTNL();
 
 	rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy, extack);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index f62c812..ac9a7e1 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1363,6 +1363,7 @@
 	}
 	if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
 	    tu->last_resolution != resolution) {
+		memset(&r1, 0, sizeof(r1));
 		r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
 		r1.tstamp = tstamp;
 		r1.val = resolution;
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
index a63ceb1..89f3d80 100644
--- a/sound/soc/soc-ops.c
+++ b/sound/soc/soc-ops.c
@@ -374,7 +374,7 @@
 	unsigned int rshift = mc->rshift;
 	int max = mc->max;
 	int min = mc->min;
-	unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
+	unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
 	unsigned int val;
 	int ret;
 
@@ -419,7 +419,7 @@
 	unsigned int rshift = mc->rshift;
 	int max = mc->max;
 	int min = mc->min;
-	unsigned int mask = (1 << (fls(min + max) - 1)) - 1;
+	unsigned int mask = (1U << (fls(min + max) - 1)) - 1;
 	int err = 0;
 	unsigned int val, val_mask, val2 = 0;